| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ | 
|---|---|
| 2 | #ifndef _ASM_X86_ENTRY_COMMON_H | 
| 3 | #define _ASM_X86_ENTRY_COMMON_H | 
| 4 | |
| 5 | #include <linux/randomize_kstack.h> | 
| 6 | #include <linux/user-return-notifier.h> | 
| 7 | |
| 8 | #include <asm/nospec-branch.h> | 
| 9 | #include <asm/io_bitmap.h> | 
| 10 | #include <asm/fpu/api.h> | 
| 11 | #include <asm/fred.h> | 
| 12 | |
| 13 | /* Check that the stack and regs on entry from user mode are sane. */ | 
| 14 | static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) | 
| 15 | { | 
| 16 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) { | 
| 17 | /* | 
| 18 | * Make sure that the entry code gave us a sensible EFLAGS | 
| 19 | * register. Native because we want to check the actual CPU | 
| 20 | * state, not the interrupt state as imagined by Xen. | 
| 21 | */ | 
| 22 | unsigned long flags = native_save_fl(); | 
| 23 | unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT; | 
| 24 | |
| 25 | /* | 
| 26 | * For !SMAP hardware we patch out CLAC on entry. | 
| 27 | */ | 
| 28 | if (cpu_feature_enabled(X86_FEATURE_SMAP) || | 
| 29 | cpu_feature_enabled(X86_FEATURE_XENPV)) | 
| 30 | mask |= X86_EFLAGS_AC; | 
| 31 | |
| 32 | WARN_ON_ONCE(flags & mask); | 
| 33 | |
| 34 | /* We think we came from user mode. Make sure pt_regs agrees. */ | 
| 35 | WARN_ON_ONCE(!user_mode(regs)); | 
| 36 | |
| 37 | /* | 
| 38 | * All entries from user mode (except #DF) should be on the | 
| 39 | * normal thread stack and should have user pt_regs in the | 
| 40 | * correct location. | 
| 41 | */ | 
| 42 | WARN_ON_ONCE(!on_thread_stack()); | 
| 43 | WARN_ON_ONCE(regs != task_pt_regs(current)); | 
| 44 | } | 
| 45 | } | 
| 46 | #define arch_enter_from_user_mode arch_enter_from_user_mode | 
| 47 | |
| 48 | static inline void arch_exit_work(unsigned long ti_work) | 
| 49 | { | 
| 50 | if (ti_work & _TIF_USER_RETURN_NOTIFY) | 
| 51 | fire_user_return_notifiers(); | 
| 52 | |
| 53 | if (unlikely(ti_work & _TIF_IO_BITMAP)) | 
| 54 | tss_update_io_bitmap(); | 
| 55 | |
| 56 | if (unlikely(ti_work & _TIF_NEED_FPU_LOAD)) | 
| 57 | switch_fpu_return(); | 
| 58 | } | 
| 59 | |
| 60 | static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, | 
| 61 | unsigned long ti_work) | 
| 62 | { | 
| 63 | fpregs_assert_state_consistent(); | 
| 64 | |
| 65 | if (unlikely(ti_work)) | 
| 66 | arch_exit_work(ti_work); | 
| 67 | |
| 68 | fred_update_rsp0(); | 
| 69 | |
| 70 | #ifdef CONFIG_COMPAT | 
| 71 | /* | 
| 72 | * Compat syscalls set TS_COMPAT. Make sure we clear it before | 
| 73 | * returning to user mode. We need to clear it *after* signal | 
| 74 | * handling, because syscall restart has a fixup for compat | 
| 75 | * syscalls. The fixup is exercised by the ptrace_syscall_32 | 
| 76 | * selftest. | 
| 77 | * | 
| 78 | * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer | 
| 79 | * special case only applies after poking regs and before the | 
| 80 | * very next return to user mode. | 
| 81 | */ | 
| 82 | current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED); | 
| 83 | #endif | 
| 84 | |
| 85 | /* | 
| 86 | * This value will get limited by KSTACK_OFFSET_MAX(), which is 10 | 
| 87 | * bits. The actual entropy will be further reduced by the compiler | 
| 88 | * when applying stack alignment constraints (see cc_stack_align4/8 in | 
| 89 | * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32) | 
| 90 | * low bits from any entropy chosen here. | 
| 91 | * | 
| 92 | * Therefore, final stack offset entropy will be 7 (x86_64) or | 
| 93 | * 8 (ia32) bits. | 
| 94 | */ | 
| 95 | choose_random_kstack_offset(rdtsc()); | 
| 96 | |
| 97 | /* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */ | 
| 98 | if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) && | 
| 99 | this_cpu_read(x86_ibpb_exit_to_user)) { | 
| 100 | indirect_branch_prediction_barrier(); | 
| 101 | this_cpu_write(x86_ibpb_exit_to_user, false); | 
| 102 | } | 
| 103 | } | 
| 104 | #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare | 
| 105 | |
| 106 | static __always_inline void arch_exit_to_user_mode(void) | 
| 107 | { | 
| 108 | amd_clear_divider(); | 
| 109 | } | 
| 110 | #define arch_exit_to_user_mode arch_exit_to_user_mode | 
| 111 | |
| 112 | #endif | 
| 113 | 
