| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef __LINUX_IRQENTRYCOMMON_H | 
|---|
| 3 | #define __LINUX_IRQENTRYCOMMON_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/static_call_types.h> | 
|---|
| 6 | #include <linux/syscalls.h> | 
|---|
| 7 | #include <linux/context_tracking.h> | 
|---|
| 8 | #include <linux/tick.h> | 
|---|
| 9 | #include <linux/kmsan.h> | 
|---|
| 10 | #include <linux/unwind_deferred.h> | 
|---|
| 11 |  | 
|---|
| 12 | #include <asm/entry-common.h> | 
|---|
| 13 |  | 
|---|
| 14 | /* | 
|---|
| 15 | * Define dummy _TIF work flags if not defined by the architecture or for | 
|---|
| 16 | * disabled functionality. | 
|---|
| 17 | */ | 
|---|
| 18 | #ifndef _TIF_PATCH_PENDING | 
|---|
| 19 | # define _TIF_PATCH_PENDING		(0) | 
|---|
| 20 | #endif | 
|---|
| 21 |  | 
|---|
| 22 | /* | 
|---|
| 23 | * TIF flags handled in exit_to_user_mode_loop() | 
|---|
| 24 | */ | 
|---|
| 25 | #ifndef ARCH_EXIT_TO_USER_MODE_WORK | 
|---|
| 26 | # define ARCH_EXIT_TO_USER_MODE_WORK		(0) | 
|---|
| 27 | #endif | 
|---|
| 28 |  | 
|---|
| 29 | #define EXIT_TO_USER_MODE_WORK						\ | 
|---|
| 30 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |		\ | 
|---|
| 31 | _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY |			\ | 
|---|
| 32 | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL |			\ | 
|---|
| 33 | ARCH_EXIT_TO_USER_MODE_WORK) | 
|---|
| 34 |  | 
|---|
| 35 | /** | 
|---|
| 36 | * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs | 
|---|
| 37 | * @regs:	Pointer to currents pt_regs | 
|---|
| 38 | * | 
|---|
| 39 | * Defaults to an empty implementation. Can be replaced by architecture | 
|---|
| 40 | * specific code. | 
|---|
| 41 | * | 
|---|
| 42 | * Invoked from syscall_enter_from_user_mode() in the non-instrumentable | 
|---|
| 43 | * section. Use __always_inline so the compiler cannot push it out of line | 
|---|
| 44 | * and make it instrumentable. | 
|---|
| 45 | */ | 
|---|
| 46 | static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); | 
|---|
| 47 |  | 
|---|
| 48 | #ifndef arch_enter_from_user_mode | 
|---|
| 49 | static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} | 
|---|
| 50 | #endif | 
|---|
| 51 |  | 
|---|
| 52 | /** | 
|---|
| 53 | * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent | 
|---|
| 54 | * states. | 
|---|
| 55 | * | 
|---|
| 56 | * Returns: true if the CPU is potentially in an RCU EQS, false otherwise. | 
|---|
| 57 | * | 
|---|
| 58 | * Architectures only need to define this if threads other than the idle thread | 
|---|
| 59 | * may have an interruptible EQS. This does not need to handle idle threads. It | 
|---|
| 60 | * is safe to over-estimate at the cost of redundant RCU management work. | 
|---|
| 61 | * | 
|---|
| 62 | * Invoked from irqentry_enter() | 
|---|
| 63 | */ | 
|---|
| 64 | #ifndef arch_in_rcu_eqs | 
|---|
| 65 | static __always_inline bool arch_in_rcu_eqs(void) { return false; } | 
|---|
| 66 | #endif | 
|---|
| 67 |  | 
|---|
| 68 | /** | 
|---|
| 69 | * enter_from_user_mode - Establish state when coming from user mode | 
|---|
| 70 | * | 
|---|
| 71 | * Syscall/interrupt entry disables interrupts, but user mode is traced as | 
|---|
| 72 | * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. | 
|---|
| 73 | * | 
|---|
| 74 | * 1) Tell lockdep that interrupts are disabled | 
|---|
| 75 | * 2) Invoke context tracking if enabled to reactivate RCU | 
|---|
| 76 | * 3) Trace interrupts off state | 
|---|
| 77 | * | 
|---|
| 78 | * Invoked from architecture specific syscall entry code with interrupts | 
|---|
| 79 | * disabled. The calling code has to be non-instrumentable. When the | 
|---|
| 80 | * function returns all state is correct and interrupts are still | 
|---|
| 81 | * disabled. The subsequent functions can be instrumented. | 
|---|
| 82 | * | 
|---|
| 83 | * This is invoked when there is architecture specific functionality to be | 
|---|
| 84 | * done between establishing state and enabling interrupts. The caller must | 
|---|
| 85 | * enable interrupts before invoking syscall_enter_from_user_mode_work(). | 
|---|
| 86 | */ | 
|---|
| 87 | static __always_inline void enter_from_user_mode(struct pt_regs *regs) | 
|---|
| 88 | { | 
|---|
| 89 | arch_enter_from_user_mode(regs); | 
|---|
| 90 | lockdep_hardirqs_off(CALLER_ADDR0); | 
|---|
| 91 |  | 
|---|
| 92 | CT_WARN_ON(__ct_state() != CT_STATE_USER); | 
|---|
| 93 | user_exit_irqoff(); | 
|---|
| 94 |  | 
|---|
| 95 | instrumentation_begin(); | 
|---|
| 96 | kmsan_unpoison_entry_regs(regs); | 
|---|
| 97 | trace_hardirqs_off_finish(); | 
|---|
| 98 | instrumentation_end(); | 
|---|
| 99 | } | 
|---|
| 100 |  | 
|---|
| 101 | /** | 
|---|
| 102 | * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() | 
|---|
| 103 | * @ti_work:	Cached TIF flags gathered with interrupts disabled | 
|---|
| 104 | * | 
|---|
| 105 | * Defaults to local_irq_enable(). Can be supplied by architecture specific | 
|---|
| 106 | * code. | 
|---|
| 107 | */ | 
|---|
| 108 | static inline void local_irq_enable_exit_to_user(unsigned long ti_work); | 
|---|
| 109 |  | 
|---|
| 110 | #ifndef local_irq_enable_exit_to_user | 
|---|
| 111 | static inline void local_irq_enable_exit_to_user(unsigned long ti_work) | 
|---|
| 112 | { | 
|---|
| 113 | local_irq_enable(); | 
|---|
| 114 | } | 
|---|
| 115 | #endif | 
|---|
| 116 |  | 
|---|
| 117 | /** | 
|---|
| 118 | * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() | 
|---|
| 119 | * | 
|---|
| 120 | * Defaults to local_irq_disable(). Can be supplied by architecture specific | 
|---|
| 121 | * code. | 
|---|
| 122 | */ | 
|---|
| 123 | static inline void local_irq_disable_exit_to_user(void); | 
|---|
| 124 |  | 
|---|
| 125 | #ifndef local_irq_disable_exit_to_user | 
|---|
| 126 | static inline void local_irq_disable_exit_to_user(void) | 
|---|
| 127 | { | 
|---|
| 128 | local_irq_disable(); | 
|---|
| 129 | } | 
|---|
| 130 | #endif | 
|---|
| 131 |  | 
|---|
| 132 | /** | 
|---|
| 133 | * arch_exit_to_user_mode_work - Architecture specific TIF work for exit | 
|---|
| 134 | *				 to user mode. | 
|---|
| 135 | * @regs:	Pointer to currents pt_regs | 
|---|
| 136 | * @ti_work:	Cached TIF flags gathered with interrupts disabled | 
|---|
| 137 | * | 
|---|
| 138 | * Invoked from exit_to_user_mode_loop() with interrupt enabled | 
|---|
| 139 | * | 
|---|
| 140 | * Defaults to NOOP. Can be supplied by architecture specific code. | 
|---|
| 141 | */ | 
|---|
| 142 | static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, | 
|---|
| 143 | unsigned long ti_work); | 
|---|
| 144 |  | 
|---|
| 145 | #ifndef arch_exit_to_user_mode_work | 
|---|
| 146 | static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, | 
|---|
| 147 | unsigned long ti_work) | 
|---|
| 148 | { | 
|---|
| 149 | } | 
|---|
| 150 | #endif | 
|---|
| 151 |  | 
|---|
| 152 | /** | 
|---|
| 153 | * arch_exit_to_user_mode_prepare - Architecture specific preparation for | 
|---|
| 154 | *				    exit to user mode. | 
|---|
| 155 | * @regs:	Pointer to currents pt_regs | 
|---|
| 156 | * @ti_work:	Cached TIF flags gathered with interrupts disabled | 
|---|
| 157 | * | 
|---|
| 158 | * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last | 
|---|
| 159 | * function before return. Defaults to NOOP. | 
|---|
| 160 | */ | 
|---|
| 161 | static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, | 
|---|
| 162 | unsigned long ti_work); | 
|---|
| 163 |  | 
|---|
| 164 | #ifndef arch_exit_to_user_mode_prepare | 
|---|
| 165 | static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, | 
|---|
| 166 | unsigned long ti_work) | 
|---|
| 167 | { | 
|---|
| 168 | } | 
|---|
| 169 | #endif | 
|---|
| 170 |  | 
|---|
| 171 | /** | 
|---|
| 172 | * arch_exit_to_user_mode - Architecture specific final work before | 
|---|
| 173 | *			    exit to user mode. | 
|---|
| 174 | * | 
|---|
| 175 | * Invoked from exit_to_user_mode() with interrupt disabled as the last | 
|---|
| 176 | * function before return. Defaults to NOOP. | 
|---|
| 177 | * | 
|---|
| 178 | * This needs to be __always_inline because it is non-instrumentable code | 
|---|
| 179 | * invoked after context tracking switched to user mode. | 
|---|
| 180 | * | 
|---|
| 181 | * An architecture implementation must not do anything complex, no locking | 
|---|
| 182 | * etc. The main purpose is for speculation mitigations. | 
|---|
| 183 | */ | 
|---|
| 184 | static __always_inline void arch_exit_to_user_mode(void); | 
|---|
| 185 |  | 
|---|
| 186 | #ifndef arch_exit_to_user_mode | 
|---|
| 187 | static __always_inline void arch_exit_to_user_mode(void) { } | 
|---|
| 188 | #endif | 
|---|
| 189 |  | 
|---|
| 190 | /** | 
|---|
| 191 | * arch_do_signal_or_restart -  Architecture specific signal delivery function | 
|---|
| 192 | * @regs:	Pointer to currents pt_regs | 
|---|
| 193 | * | 
|---|
| 194 | * Invoked from exit_to_user_mode_loop(). | 
|---|
| 195 | */ | 
|---|
| 196 | void arch_do_signal_or_restart(struct pt_regs *regs); | 
|---|
| 197 |  | 
|---|
| 198 | /** | 
|---|
| 199 | * exit_to_user_mode_loop - do any pending work before leaving to user space | 
|---|
| 200 | */ | 
|---|
| 201 | unsigned long exit_to_user_mode_loop(struct pt_regs *regs, | 
|---|
| 202 | unsigned long ti_work); | 
|---|
| 203 |  | 
|---|
| 204 | /** | 
|---|
| 205 | * exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required | 
|---|
| 206 | * @regs:	Pointer to pt_regs on entry stack | 
|---|
| 207 | * | 
|---|
| 208 | * 1) check that interrupts are disabled | 
|---|
| 209 | * 2) call tick_nohz_user_enter_prepare() | 
|---|
| 210 | * 3) call exit_to_user_mode_loop() if any flags from | 
|---|
| 211 | *    EXIT_TO_USER_MODE_WORK are set | 
|---|
| 212 | * 4) check that interrupts are still disabled | 
|---|
| 213 | */ | 
|---|
| 214 | static __always_inline void exit_to_user_mode_prepare(struct pt_regs *regs) | 
|---|
| 215 | { | 
|---|
| 216 | unsigned long ti_work; | 
|---|
| 217 |  | 
|---|
| 218 | lockdep_assert_irqs_disabled(); | 
|---|
| 219 |  | 
|---|
| 220 | /* Flush pending rcuog wakeup before the last need_resched() check */ | 
|---|
| 221 | tick_nohz_user_enter_prepare(); | 
|---|
| 222 |  | 
|---|
| 223 | ti_work = read_thread_flags(); | 
|---|
| 224 | if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK)) | 
|---|
| 225 | ti_work = exit_to_user_mode_loop(regs, ti_work); | 
|---|
| 226 |  | 
|---|
| 227 | arch_exit_to_user_mode_prepare(regs, ti_work); | 
|---|
| 228 |  | 
|---|
| 229 | /* Ensure that kernel state is sane for a return to userspace */ | 
|---|
| 230 | kmap_assert_nomap(); | 
|---|
| 231 | lockdep_assert_irqs_disabled(); | 
|---|
| 232 | lockdep_sys_exit(); | 
|---|
| 233 | } | 
|---|
| 234 |  | 
|---|
| 235 | /** | 
|---|
| 236 | * exit_to_user_mode - Fixup state when exiting to user mode | 
|---|
| 237 | * | 
|---|
| 238 | * Syscall/interrupt exit enables interrupts, but the kernel state is | 
|---|
| 239 | * interrupts disabled when this is invoked. Also tell RCU about it. | 
|---|
| 240 | * | 
|---|
| 241 | * 1) Trace interrupts on state | 
|---|
| 242 | * 2) Invoke context tracking if enabled to adjust RCU state | 
|---|
| 243 | * 3) Invoke architecture specific last minute exit code, e.g. speculation | 
|---|
| 244 | *    mitigations, etc.: arch_exit_to_user_mode() | 
|---|
| 245 | * 4) Tell lockdep that interrupts are enabled | 
|---|
| 246 | * | 
|---|
| 247 | * Invoked from architecture specific code when syscall_exit_to_user_mode() | 
|---|
| 248 | * is not suitable as the last step before returning to userspace. Must be | 
|---|
| 249 | * invoked with interrupts disabled and the caller must be | 
|---|
| 250 | * non-instrumentable. | 
|---|
| 251 | * The caller has to invoke syscall_exit_to_user_mode_work() before this. | 
|---|
| 252 | */ | 
|---|
| 253 | static __always_inline void exit_to_user_mode(void) | 
|---|
| 254 | { | 
|---|
| 255 | instrumentation_begin(); | 
|---|
| 256 | trace_hardirqs_on_prepare(); | 
|---|
| 257 | lockdep_hardirqs_on_prepare(); | 
|---|
| 258 | instrumentation_end(); | 
|---|
| 259 |  | 
|---|
| 260 | unwind_reset_info(); | 
|---|
| 261 | user_enter_irqoff(); | 
|---|
| 262 | arch_exit_to_user_mode(); | 
|---|
| 263 | lockdep_hardirqs_on(CALLER_ADDR0); | 
|---|
| 264 | } | 
|---|
| 265 |  | 
|---|
| 266 | /** | 
|---|
| 267 | * irqentry_enter_from_user_mode - Establish state before invoking the irq handler | 
|---|
| 268 | * @regs:	Pointer to currents pt_regs | 
|---|
| 269 | * | 
|---|
| 270 | * Invoked from architecture specific entry code with interrupts disabled. | 
|---|
| 271 | * Can only be called when the interrupt entry came from user mode. The | 
|---|
| 272 | * calling code must be non-instrumentable.  When the function returns all | 
|---|
| 273 | * state is correct and the subsequent functions can be instrumented. | 
|---|
| 274 | * | 
|---|
| 275 | * The function establishes state (lockdep, RCU (context tracking), tracing) | 
|---|
| 276 | */ | 
|---|
| 277 | void irqentry_enter_from_user_mode(struct pt_regs *regs); | 
|---|
| 278 |  | 
|---|
| 279 | /** | 
|---|
| 280 | * irqentry_exit_to_user_mode - Interrupt exit work | 
|---|
| 281 | * @regs:	Pointer to current's pt_regs | 
|---|
| 282 | * | 
|---|
| 283 | * Invoked with interrupts disabled and fully valid regs. Returns with all | 
|---|
| 284 | * work handled, interrupts disabled such that the caller can immediately | 
|---|
| 285 | * switch to user mode. Called from architecture specific interrupt | 
|---|
| 286 | * handling code. | 
|---|
| 287 | * | 
|---|
| 288 | * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). | 
|---|
| 289 | * Interrupt exit is not invoking #1 which is the syscall specific one time | 
|---|
| 290 | * work. | 
|---|
| 291 | */ | 
|---|
| 292 | void irqentry_exit_to_user_mode(struct pt_regs *regs); | 
|---|
| 293 |  | 
|---|
| 294 | #ifndef irqentry_state | 
|---|
| 295 | /** | 
|---|
| 296 | * struct irqentry_state - Opaque object for exception state storage | 
|---|
| 297 | * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the | 
|---|
| 298 | *            exit path has to invoke ct_irq_exit(). | 
|---|
| 299 | * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that | 
|---|
| 300 | *           lockdep state is restored correctly on exit from nmi. | 
|---|
| 301 | * | 
|---|
| 302 | * This opaque object is filled in by the irqentry_*_enter() functions and | 
|---|
| 303 | * must be passed back into the corresponding irqentry_*_exit() functions | 
|---|
| 304 | * when the exception is complete. | 
|---|
| 305 | * | 
|---|
| 306 | * Callers of irqentry_*_[enter|exit]() must consider this structure opaque | 
|---|
| 307 | * and all members private.  Descriptions of the members are provided to aid in | 
|---|
| 308 | * the maintenance of the irqentry_*() functions. | 
|---|
| 309 | */ | 
|---|
| 310 | typedef struct irqentry_state { | 
|---|
| 311 | union { | 
|---|
| 312 | bool	exit_rcu; | 
|---|
| 313 | bool	lockdep; | 
|---|
| 314 | }; | 
|---|
| 315 | } irqentry_state_t; | 
|---|
| 316 | #endif | 
|---|
| 317 |  | 
|---|
| 318 | /** | 
|---|
| 319 | * irqentry_enter - Handle state tracking on ordinary interrupt entries | 
|---|
| 320 | * @regs:	Pointer to pt_regs of interrupted context | 
|---|
| 321 | * | 
|---|
| 322 | * Invokes: | 
|---|
| 323 | *  - lockdep irqflag state tracking as low level ASM entry disabled | 
|---|
| 324 | *    interrupts. | 
|---|
| 325 | * | 
|---|
| 326 | *  - Context tracking if the exception hit user mode. | 
|---|
| 327 | * | 
|---|
| 328 | *  - The hardirq tracer to keep the state consistent as low level ASM | 
|---|
| 329 | *    entry disabled interrupts. | 
|---|
| 330 | * | 
|---|
| 331 | * As a precondition, this requires that the entry came from user mode, | 
|---|
| 332 | * idle, or a kernel context in which RCU is watching. | 
|---|
| 333 | * | 
|---|
| 334 | * For kernel mode entries RCU handling is done conditional. If RCU is | 
|---|
| 335 | * watching then the only RCU requirement is to check whether the tick has | 
|---|
| 336 | * to be restarted. If RCU is not watching then ct_irq_enter() has to be | 
|---|
| 337 | * invoked on entry and ct_irq_exit() on exit. | 
|---|
| 338 | * | 
|---|
| 339 | * Avoiding the ct_irq_enter/exit() calls is an optimization but also | 
|---|
| 340 | * solves the problem of kernel mode pagefaults which can schedule, which | 
|---|
| 341 | * is not possible after invoking ct_irq_enter() without undoing it. | 
|---|
| 342 | * | 
|---|
| 343 | * For user mode entries irqentry_enter_from_user_mode() is invoked to | 
|---|
| 344 | * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit | 
|---|
| 345 | * would not be possible. | 
|---|
| 346 | * | 
|---|
| 347 | * Returns: An opaque object that must be passed to idtentry_exit() | 
|---|
| 348 | */ | 
|---|
| 349 | irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); | 
|---|
| 350 |  | 
|---|
| 351 | /** | 
|---|
| 352 | * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt | 
|---|
| 353 | * | 
|---|
| 354 | * Conditional reschedule with additional sanity checks. | 
|---|
| 355 | */ | 
|---|
| 356 | void raw_irqentry_exit_cond_resched(void); | 
|---|
| 357 | #ifdef CONFIG_PREEMPT_DYNAMIC | 
|---|
| 358 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) | 
|---|
| 359 | #define irqentry_exit_cond_resched_dynamic_enabled	raw_irqentry_exit_cond_resched | 
|---|
| 360 | #define irqentry_exit_cond_resched_dynamic_disabled	NULL | 
|---|
| 361 | DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); | 
|---|
| 362 | #define irqentry_exit_cond_resched()	static_call(irqentry_exit_cond_resched)() | 
|---|
| 363 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) | 
|---|
| 364 | DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); | 
|---|
| 365 | void dynamic_irqentry_exit_cond_resched(void); | 
|---|
| 366 | #define irqentry_exit_cond_resched()	dynamic_irqentry_exit_cond_resched() | 
|---|
| 367 | #endif | 
|---|
| 368 | #else /* CONFIG_PREEMPT_DYNAMIC */ | 
|---|
| 369 | #define irqentry_exit_cond_resched()	raw_irqentry_exit_cond_resched() | 
|---|
| 370 | #endif /* CONFIG_PREEMPT_DYNAMIC */ | 
|---|
| 371 |  | 
|---|
| 372 | /** | 
|---|
| 373 | * irqentry_exit - Handle return from exception that used irqentry_enter() | 
|---|
| 374 | * @regs:	Pointer to pt_regs (exception entry regs) | 
|---|
| 375 | * @state:	Return value from matching call to irqentry_enter() | 
|---|
| 376 | * | 
|---|
| 377 | * Depending on the return target (kernel/user) this runs the necessary | 
|---|
| 378 | * preemption and work checks if possible and required and returns to | 
|---|
| 379 | * the caller with interrupts disabled and no further work pending. | 
|---|
| 380 | * | 
|---|
| 381 | * This is the last action before returning to the low level ASM code which | 
|---|
| 382 | * just needs to return to the appropriate context. | 
|---|
| 383 | * | 
|---|
| 384 | * Counterpart to irqentry_enter(). | 
|---|
| 385 | */ | 
|---|
| 386 | void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); | 
|---|
| 387 |  | 
|---|
| 388 | /** | 
|---|
| 389 | * irqentry_nmi_enter - Handle NMI entry | 
|---|
| 390 | * @regs:	Pointer to currents pt_regs | 
|---|
| 391 | * | 
|---|
| 392 | * Similar to irqentry_enter() but taking care of the NMI constraints. | 
|---|
| 393 | */ | 
|---|
| 394 | irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); | 
|---|
| 395 |  | 
|---|
| 396 | /** | 
|---|
| 397 | * irqentry_nmi_exit - Handle return from NMI handling | 
|---|
| 398 | * @regs:	Pointer to pt_regs (NMI entry regs) | 
|---|
| 399 | * @irq_state:	Return value from matching call to irqentry_nmi_enter() | 
|---|
| 400 | * | 
|---|
| 401 | * Last action before returning to the low level assembly code. | 
|---|
| 402 | * | 
|---|
| 403 | * Counterpart to irqentry_nmi_enter(). | 
|---|
| 404 | */ | 
|---|
| 405 | void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); | 
|---|
| 406 |  | 
|---|
| 407 | #endif | 
|---|
| 408 |  | 
|---|