| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef LINUX_HARDIRQ_H | 
|---|
| 3 | #define LINUX_HARDIRQ_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/context_tracking_state.h> | 
|---|
| 6 | #include <linux/preempt.h> | 
|---|
| 7 | #include <linux/lockdep.h> | 
|---|
| 8 | #include <linux/ftrace_irq.h> | 
|---|
| 9 | #include <linux/sched.h> | 
|---|
| 10 | #include <linux/vtime.h> | 
|---|
| 11 | #include <asm/hardirq.h> | 
|---|
| 12 |  | 
|---|
| 13 | extern void synchronize_irq(unsigned int irq); | 
|---|
| 14 | extern bool synchronize_hardirq(unsigned int irq); | 
|---|
| 15 |  | 
|---|
| 16 | #ifdef CONFIG_NO_HZ_FULL | 
|---|
| 17 | void __rcu_irq_enter_check_tick(void); | 
|---|
| 18 | #else | 
|---|
| 19 | static inline void __rcu_irq_enter_check_tick(void) { } | 
|---|
| 20 | #endif | 
|---|
| 21 |  | 
|---|
| 22 | static __always_inline void rcu_irq_enter_check_tick(void) | 
|---|
| 23 | { | 
|---|
| 24 | if (context_tracking_enabled()) | 
|---|
| 25 | __rcu_irq_enter_check_tick(); | 
|---|
| 26 | } | 
|---|
| 27 |  | 
|---|
| 28 | /* | 
|---|
| 29 | * It is safe to do non-atomic ops on ->hardirq_context, | 
|---|
| 30 | * because NMI handlers may not preempt and the ops are | 
|---|
| 31 | * always balanced, so the interrupted value of ->hardirq_context | 
|---|
| 32 | * will always be restored. | 
|---|
| 33 | */ | 
|---|
| 34 | #define __irq_enter()					\ | 
|---|
| 35 | do {						\ | 
|---|
| 36 | preempt_count_add(HARDIRQ_OFFSET);	\ | 
|---|
| 37 | lockdep_hardirq_enter();		\ | 
|---|
| 38 | account_hardirq_enter(current);		\ | 
|---|
| 39 | } while (0) | 
|---|
| 40 |  | 
|---|
| 41 | /* | 
|---|
| 42 | * Like __irq_enter() without time accounting for fast | 
|---|
| 43 | * interrupts, e.g. reschedule IPI where time accounting | 
|---|
| 44 | * is more expensive than the actual interrupt. | 
|---|
| 45 | */ | 
|---|
| 46 | #define __irq_enter_raw()				\ | 
|---|
| 47 | do {						\ | 
|---|
| 48 | preempt_count_add(HARDIRQ_OFFSET);	\ | 
|---|
| 49 | lockdep_hardirq_enter();		\ | 
|---|
| 50 | } while (0) | 
|---|
| 51 |  | 
|---|
| 52 | /* | 
|---|
| 53 | * Enter irq context (on NO_HZ, update jiffies): | 
|---|
| 54 | */ | 
|---|
| 55 | void irq_enter(void); | 
|---|
| 56 | /* | 
|---|
| 57 | * Like irq_enter(), but RCU is already watching. | 
|---|
| 58 | */ | 
|---|
| 59 | void irq_enter_rcu(void); | 
|---|
| 60 |  | 
|---|
| 61 | /* | 
|---|
| 62 | * Exit irq context without processing softirqs: | 
|---|
| 63 | */ | 
|---|
| 64 | #define __irq_exit()					\ | 
|---|
| 65 | do {						\ | 
|---|
| 66 | account_hardirq_exit(current);		\ | 
|---|
| 67 | lockdep_hardirq_exit();			\ | 
|---|
| 68 | preempt_count_sub(HARDIRQ_OFFSET);	\ | 
|---|
| 69 | } while (0) | 
|---|
| 70 |  | 
|---|
| 71 | /* | 
|---|
| 72 | * Like __irq_exit() without time accounting | 
|---|
| 73 | */ | 
|---|
| 74 | #define __irq_exit_raw()				\ | 
|---|
| 75 | do {						\ | 
|---|
| 76 | lockdep_hardirq_exit();			\ | 
|---|
| 77 | preempt_count_sub(HARDIRQ_OFFSET);	\ | 
|---|
| 78 | } while (0) | 
|---|
| 79 |  | 
|---|
| 80 | /* | 
|---|
| 81 | * Exit irq context and process softirqs if needed: | 
|---|
| 82 | */ | 
|---|
| 83 | void irq_exit(void); | 
|---|
| 84 |  | 
|---|
| 85 | /* | 
|---|
| 86 | * Like irq_exit(), but return with RCU watching. | 
|---|
| 87 | */ | 
|---|
| 88 | void irq_exit_rcu(void); | 
|---|
| 89 |  | 
|---|
| 90 | #ifndef arch_nmi_enter | 
|---|
| 91 | #define arch_nmi_enter()	do { } while (0) | 
|---|
| 92 | #define arch_nmi_exit()		do { } while (0) | 
|---|
| 93 | #endif | 
|---|
| 94 |  | 
|---|
| 95 | /* | 
|---|
| 96 | * NMI vs Tracing | 
|---|
| 97 | * -------------- | 
|---|
| 98 | * | 
|---|
| 99 | * We must not land in a tracer until (or after) we've changed preempt_count | 
|---|
| 100 | * such that in_nmi() becomes true. To that effect all NMI C entry points must | 
|---|
| 101 | * be marked 'notrace' and call nmi_enter() as soon as possible. | 
|---|
| 102 | */ | 
|---|
| 103 |  | 
|---|
| 104 | /* | 
|---|
| 105 | * nmi_enter() can nest up to 15 times; see NMI_BITS. | 
|---|
| 106 | */ | 
|---|
| 107 | #define __nmi_enter()						\ | 
|---|
| 108 | do {							\ | 
|---|
| 109 | lockdep_off();					\ | 
|---|
| 110 | arch_nmi_enter();				\ | 
|---|
| 111 | BUG_ON(in_nmi() == NMI_MASK);			\ | 
|---|
| 112 | __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);	\ | 
|---|
| 113 | } while (0) | 
|---|
| 114 |  | 
|---|
| 115 | #define nmi_enter()						\ | 
|---|
| 116 | do {							\ | 
|---|
| 117 | __nmi_enter();					\ | 
|---|
| 118 | lockdep_hardirq_enter();			\ | 
|---|
| 119 | ct_nmi_enter();				\ | 
|---|
| 120 | instrumentation_begin();			\ | 
|---|
| 121 | ftrace_nmi_enter();				\ | 
|---|
| 122 | instrumentation_end();				\ | 
|---|
| 123 | } while (0) | 
|---|
| 124 |  | 
|---|
| 125 | #define __nmi_exit()						\ | 
|---|
| 126 | do {							\ | 
|---|
| 127 | BUG_ON(!in_nmi());				\ | 
|---|
| 128 | __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET);	\ | 
|---|
| 129 | arch_nmi_exit();				\ | 
|---|
| 130 | lockdep_on();					\ | 
|---|
| 131 | } while (0) | 
|---|
| 132 |  | 
|---|
| 133 | #define nmi_exit()						\ | 
|---|
| 134 | do {							\ | 
|---|
| 135 | instrumentation_begin();			\ | 
|---|
| 136 | ftrace_nmi_exit();				\ | 
|---|
| 137 | instrumentation_end();				\ | 
|---|
| 138 | ct_nmi_exit();					\ | 
|---|
| 139 | lockdep_hardirq_exit();				\ | 
|---|
| 140 | __nmi_exit();					\ | 
|---|
| 141 | } while (0) | 
|---|
| 142 |  | 
|---|
| 143 | #endif /* LINUX_HARDIRQ_H */ | 
|---|
| 144 |  | 
|---|