| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #include <linux/jump_label.h> | 
|---|
| 3 | #include <asm/unwind_hints.h> | 
|---|
| 4 | #include <asm/cpufeatures.h> | 
|---|
| 5 | #include <asm/page_types.h> | 
|---|
| 6 | #include <asm/percpu.h> | 
|---|
| 7 | #include <asm/asm-offsets.h> | 
|---|
| 8 | #include <asm/processor-flags.h> | 
|---|
| 9 | #include <asm/ptrace-abi.h> | 
|---|
| 10 | #include <asm/msr.h> | 
|---|
| 11 | #include <asm/nospec-branch.h> | 
|---|
| 12 |  | 
|---|
| 13 | /* | 
|---|
| 14 |  | 
|---|
| 15 | x86 function call convention, 64-bit: | 
|---|
| 16 | ------------------------------------- | 
|---|
| 17 | arguments           |  callee-saved      | extra caller-saved | return | 
|---|
| 18 | [callee-clobbered]   |                    | [callee-clobbered] | | 
|---|
| 19 | --------------------------------------------------------------------------- | 
|---|
| 20 | rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**] | 
|---|
| 21 |  | 
|---|
| 22 | ( rsp is obviously invariant across normal function calls. (gcc can 'merge' | 
|---|
| 23 | functions when it sees tail-call optimization possibilities) rflags is | 
|---|
| 24 | clobbered. Leftover arguments are passed over the stack frame.) | 
|---|
| 25 |  | 
|---|
| 26 | [*]  In the frame-pointers case rbp is fixed to the stack frame. | 
|---|
| 27 |  | 
|---|
| 28 | [**] for struct return values wider than 64 bits the return convention is a | 
|---|
| 29 | bit more complex: up to 128 bits width we return small structures | 
|---|
| 30 | straight in rax, rdx. For structures larger than that (3 words or | 
|---|
| 31 | larger) the caller puts a pointer to an on-stack return struct | 
|---|
| 32 | [allocated in the caller's stack frame] into the first argument - i.e. | 
|---|
| 33 | into rdi. All other arguments shift up by one in this case. | 
|---|
| 34 | Fortunately this case is rare in the kernel. | 
|---|
| 35 |  | 
|---|
| 36 | For 32-bit we have the following conventions - kernel is built with | 
|---|
| 37 | -mregparm=3 and -freg-struct-return: | 
|---|
| 38 |  | 
|---|
| 39 | x86 function calling convention, 32-bit: | 
|---|
| 40 | ---------------------------------------- | 
|---|
| 41 | arguments         | callee-saved        | extra caller-saved | return | 
|---|
| 42 | [callee-clobbered] |                     | [callee-clobbered] | | 
|---|
| 43 | ------------------------------------------------------------------------- | 
|---|
| 44 | eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**] | 
|---|
| 45 |  | 
|---|
| 46 | ( here too esp is obviously invariant across normal function calls. eflags | 
|---|
| 47 | is clobbered. Leftover arguments are passed over the stack frame. ) | 
|---|
| 48 |  | 
|---|
| 49 | [*]  In the frame-pointers case ebp is fixed to the stack frame. | 
|---|
| 50 |  | 
|---|
| 51 | [**] We build with -freg-struct-return, which on 32-bit means similar | 
|---|
| 52 | semantics as on 64-bit: edx can be used for a second return value | 
|---|
| 53 | (i.e. covering integer and structure sizes up to 64 bits) - after that | 
|---|
| 54 | it gets more complex and more expensive: 3-word or larger struct returns | 
|---|
| 55 | get done in the caller's frame and the pointer to the return struct goes | 
|---|
| 56 | into regparm0, i.e. eax - the other arguments shift up and the | 
|---|
| 57 | function's register parameters degenerate to regparm=2 in essence. | 
|---|
| 58 |  | 
|---|
| 59 | */ | 
|---|
| 60 |  | 
|---|
| 61 | #ifdef CONFIG_X86_64 | 
|---|
| 62 |  | 
|---|
| 63 | /* | 
|---|
| 64 | * 64-bit system call stack frame layout defines and helpers, | 
|---|
| 65 | * for assembly code: | 
|---|
| 66 | */ | 
|---|
| 67 |  | 
|---|
| 68 | .macro PUSH_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 unwind_hint=1 | 
|---|
| 69 | .if \save_ret | 
|---|
| 70 | pushq	%rsi		/* pt_regs->si */ | 
|---|
| 71 | movq	8(%rsp), %rsi	/* temporarily store the return address in %rsi */ | 
|---|
| 72 | movq	%rdi, 8(%rsp)	/* pt_regs->di (overwriting original return address) */ | 
|---|
| 73 | /* We just clobbered the return address - use the IRET frame for unwinding: */ | 
|---|
| 74 | UNWIND_HINT_IRET_REGS offset=3*8 | 
|---|
| 75 | .else | 
|---|
| 76 | pushq   %rdi		/* pt_regs->di */ | 
|---|
| 77 | pushq   %rsi		/* pt_regs->si */ | 
|---|
| 78 | .endif | 
|---|
| 79 | pushq	\rdx		/* pt_regs->dx */ | 
|---|
| 80 | pushq   \rcx		/* pt_regs->cx */ | 
|---|
| 81 | pushq   \rax		/* pt_regs->ax */ | 
|---|
| 82 | pushq   %r8		/* pt_regs->r8 */ | 
|---|
| 83 | pushq   %r9		/* pt_regs->r9 */ | 
|---|
| 84 | pushq   %r10		/* pt_regs->r10 */ | 
|---|
| 85 | pushq   %r11		/* pt_regs->r11 */ | 
|---|
| 86 | pushq	%rbx		/* pt_regs->rbx */ | 
|---|
| 87 | pushq	%rbp		/* pt_regs->rbp */ | 
|---|
| 88 | pushq	%r12		/* pt_regs->r12 */ | 
|---|
| 89 | pushq	%r13		/* pt_regs->r13 */ | 
|---|
| 90 | pushq	%r14		/* pt_regs->r14 */ | 
|---|
| 91 | pushq	%r15		/* pt_regs->r15 */ | 
|---|
| 92 |  | 
|---|
| 93 | .if \unwind_hint | 
|---|
| 94 | UNWIND_HINT_REGS | 
|---|
| 95 | .endif | 
|---|
| 96 |  | 
|---|
| 97 | .if \save_ret | 
|---|
| 98 | pushq	%rsi		/* return address on top of stack */ | 
|---|
| 99 | .endif | 
|---|
| 100 | .endm | 
|---|
| 101 |  | 
|---|
| 102 | .macro CLEAR_REGS clear_callee=1 | 
|---|
| 103 | /* | 
|---|
| 104 | * Sanitize registers of values that a speculation attack might | 
|---|
| 105 | * otherwise want to exploit. The lower registers are likely clobbered | 
|---|
| 106 | * well before they could be put to use in a speculative execution | 
|---|
| 107 | * gadget. | 
|---|
| 108 | */ | 
|---|
| 109 | xorl	%esi,  %esi	/* nospec si  */ | 
|---|
| 110 | xorl	%edx,  %edx	/* nospec dx  */ | 
|---|
| 111 | xorl	%ecx,  %ecx	/* nospec cx  */ | 
|---|
| 112 | xorl	%r8d,  %r8d	/* nospec r8  */ | 
|---|
| 113 | xorl	%r9d,  %r9d	/* nospec r9  */ | 
|---|
| 114 | xorl	%r10d, %r10d	/* nospec r10 */ | 
|---|
| 115 | xorl	%r11d, %r11d	/* nospec r11 */ | 
|---|
| 116 | .if \clear_callee | 
|---|
| 117 | xorl	%ebx,  %ebx	/* nospec rbx */ | 
|---|
| 118 | xorl	%ebp,  %ebp	/* nospec rbp */ | 
|---|
| 119 | xorl	%r12d, %r12d	/* nospec r12 */ | 
|---|
| 120 | xorl	%r13d, %r13d	/* nospec r13 */ | 
|---|
| 121 | xorl	%r14d, %r14d	/* nospec r14 */ | 
|---|
| 122 | xorl	%r15d, %r15d	/* nospec r15 */ | 
|---|
| 123 | .endif | 
|---|
| 124 | .endm | 
|---|
| 125 |  | 
|---|
| 126 | .macro PUSH_AND_CLEAR_REGS rdx=%rdx rcx=%rcx rax=%rax save_ret=0 clear_callee=1 unwind_hint=1 | 
|---|
| 127 | PUSH_REGS rdx=\rdx, rcx=\rcx, rax=\rax, save_ret=\save_ret unwind_hint=\unwind_hint | 
|---|
| 128 | CLEAR_REGS clear_callee=\clear_callee | 
|---|
| 129 | .endm | 
|---|
| 130 |  | 
|---|
| 131 | .macro POP_REGS pop_rdi=1 | 
|---|
| 132 | popq %r15 | 
|---|
| 133 | popq %r14 | 
|---|
| 134 | popq %r13 | 
|---|
| 135 | popq %r12 | 
|---|
| 136 | popq %rbp | 
|---|
| 137 | popq %rbx | 
|---|
| 138 | popq %r11 | 
|---|
| 139 | popq %r10 | 
|---|
| 140 | popq %r9 | 
|---|
| 141 | popq %r8 | 
|---|
| 142 | popq %rax | 
|---|
| 143 | popq %rcx | 
|---|
| 144 | popq %rdx | 
|---|
| 145 | popq %rsi | 
|---|
| 146 | .if \pop_rdi | 
|---|
| 147 | popq %rdi | 
|---|
| 148 | .endif | 
|---|
| 149 | .endm | 
|---|
| 150 |  | 
|---|
| 151 | #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION | 
|---|
| 152 |  | 
|---|
| 153 | /* | 
|---|
| 154 | * MITIGATION_PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two | 
|---|
| 155 | * halves: | 
|---|
| 156 | */ | 
|---|
| 157 | #define PTI_USER_PGTABLE_BIT		PAGE_SHIFT | 
|---|
| 158 | #define PTI_USER_PGTABLE_MASK		(1 << PTI_USER_PGTABLE_BIT) | 
|---|
| 159 | #define PTI_USER_PCID_BIT		X86_CR3_PTI_PCID_USER_BIT | 
|---|
| 160 | #define PTI_USER_PCID_MASK		(1 << PTI_USER_PCID_BIT) | 
|---|
| 161 | #define PTI_USER_PGTABLE_AND_PCID_MASK  (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) | 
|---|
| 162 |  | 
|---|
| 163 | .macro SET_NOFLUSH_BIT	reg:req | 
|---|
| 164 | bts	$X86_CR3_PCID_NOFLUSH_BIT, \reg | 
|---|
| 165 | .endm | 
|---|
| 166 |  | 
|---|
| 167 | .macro ADJUST_KERNEL_CR3 reg:req | 
|---|
| 168 | ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID | 
|---|
| 169 | /* Clear PCID and "MITIGATION_PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ | 
|---|
| 170 | andq    $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg | 
|---|
| 171 | .endm | 
|---|
| 172 |  | 
|---|
| 173 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req | 
|---|
| 174 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | 
|---|
| 175 | mov	%cr3, \scratch_reg | 
|---|
| 176 | ADJUST_KERNEL_CR3 \scratch_reg | 
|---|
| 177 | mov	\scratch_reg, %cr3 | 
|---|
| 178 | .Lend_\@: | 
|---|
| 179 | .endm | 
|---|
| 180 |  | 
|---|
| 181 | #define THIS_CPU_user_pcid_flush_mask   \ | 
|---|
| 182 | PER_CPU_VAR(cpu_tlbstate + TLB_STATE_user_pcid_flush_mask) | 
|---|
| 183 |  | 
|---|
| 184 | .macro SWITCH_TO_USER_CR3 scratch_reg:req scratch_reg2:req | 
|---|
| 185 | mov	%cr3, \scratch_reg | 
|---|
| 186 |  | 
|---|
| 187 | ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID | 
|---|
| 188 |  | 
|---|
| 189 | /* | 
|---|
| 190 | * Test if the ASID needs a flush. | 
|---|
| 191 | */ | 
|---|
| 192 | movq	\scratch_reg, \scratch_reg2 | 
|---|
| 193 | andq	$(0x7FF), \scratch_reg		/* mask ASID */ | 
|---|
| 194 | bt	\scratch_reg, THIS_CPU_user_pcid_flush_mask | 
|---|
| 195 | jnc	.Lnoflush_\@ | 
|---|
| 196 |  | 
|---|
| 197 | /* Flush needed, clear the bit */ | 
|---|
| 198 | btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask | 
|---|
| 199 | movq	\scratch_reg2, \scratch_reg | 
|---|
| 200 | jmp	.Lwrcr3_pcid_\@ | 
|---|
| 201 |  | 
|---|
| 202 | .Lnoflush_\@: | 
|---|
| 203 | movq	\scratch_reg2, \scratch_reg | 
|---|
| 204 | SET_NOFLUSH_BIT \scratch_reg | 
|---|
| 205 |  | 
|---|
| 206 | .Lwrcr3_pcid_\@: | 
|---|
| 207 | /* Flip the ASID to the user version */ | 
|---|
| 208 | orq	$(PTI_USER_PCID_MASK), \scratch_reg | 
|---|
| 209 |  | 
|---|
| 210 | .Lwrcr3_\@: | 
|---|
| 211 | /* Flip the PGD to the user version */ | 
|---|
| 212 | orq     $(PTI_USER_PGTABLE_MASK), \scratch_reg | 
|---|
| 213 | mov	\scratch_reg, %cr3 | 
|---|
| 214 | .endm | 
|---|
| 215 |  | 
|---|
| 216 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req | 
|---|
| 217 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | 
|---|
| 218 | SWITCH_TO_USER_CR3 \scratch_reg \scratch_reg2 | 
|---|
| 219 | .Lend_\@: | 
|---|
| 220 | .endm | 
|---|
| 221 |  | 
|---|
| 222 | .macro SWITCH_TO_USER_CR3_STACK	scratch_reg:req | 
|---|
| 223 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | 
|---|
| 224 | pushq	%rax | 
|---|
| 225 | SWITCH_TO_USER_CR3 scratch_reg=\scratch_reg scratch_reg2=%rax | 
|---|
| 226 | popq	%rax | 
|---|
| 227 | .Lend_\@: | 
|---|
| 228 | .endm | 
|---|
| 229 |  | 
|---|
| 230 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req | 
|---|
| 231 | ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI | 
|---|
| 232 | movq	%cr3, \scratch_reg | 
|---|
| 233 | movq	\scratch_reg, \save_reg | 
|---|
| 234 | /* | 
|---|
| 235 | * Test the user pagetable bit. If set, then the user page tables | 
|---|
| 236 | * are active. If clear CR3 already has the kernel page table | 
|---|
| 237 | * active. | 
|---|
| 238 | */ | 
|---|
| 239 | bt	$PTI_USER_PGTABLE_BIT, \scratch_reg | 
|---|
| 240 | jnc	.Ldone_\@ | 
|---|
| 241 |  | 
|---|
| 242 | ADJUST_KERNEL_CR3 \scratch_reg | 
|---|
| 243 | movq	\scratch_reg, %cr3 | 
|---|
| 244 |  | 
|---|
| 245 | .Ldone_\@: | 
|---|
| 246 | .endm | 
|---|
| 247 |  | 
|---|
| 248 | /* Restore CR3 from a kernel context. May restore a user CR3 value. */ | 
|---|
| 249 | .macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req | 
|---|
| 250 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI | 
|---|
| 251 |  | 
|---|
| 252 | /* | 
|---|
| 253 | * If CR3 contained the kernel page tables at the paranoid exception | 
|---|
| 254 | * entry, then there is nothing to restore as CR3 is not modified while | 
|---|
| 255 | * handling the exception. | 
|---|
| 256 | */ | 
|---|
| 257 | bt	$PTI_USER_PGTABLE_BIT, \save_reg | 
|---|
| 258 | jnc	.Lend_\@ | 
|---|
| 259 |  | 
|---|
| 260 | ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID | 
|---|
| 261 |  | 
|---|
| 262 | /* | 
|---|
| 263 | * Check if there's a pending flush for the user ASID we're | 
|---|
| 264 | * about to set. | 
|---|
| 265 | */ | 
|---|
| 266 | movq	\save_reg, \scratch_reg | 
|---|
| 267 | andq	$(0x7FF), \scratch_reg | 
|---|
| 268 | btr	\scratch_reg, THIS_CPU_user_pcid_flush_mask | 
|---|
| 269 | jc	.Lwrcr3_\@ | 
|---|
| 270 |  | 
|---|
| 271 | SET_NOFLUSH_BIT \save_reg | 
|---|
| 272 |  | 
|---|
| 273 | .Lwrcr3_\@: | 
|---|
| 274 | movq	\save_reg, %cr3 | 
|---|
| 275 | .Lend_\@: | 
|---|
| 276 | .endm | 
|---|
| 277 |  | 
|---|
| 278 | #else /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION=n: */ | 
|---|
| 279 |  | 
|---|
| 280 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req | 
|---|
| 281 | .endm | 
|---|
| 282 | .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req | 
|---|
| 283 | .endm | 
|---|
| 284 | .macro SWITCH_TO_USER_CR3_STACK scratch_reg:req | 
|---|
| 285 | .endm | 
|---|
| 286 | .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req | 
|---|
| 287 | .endm | 
|---|
| 288 | .macro PARANOID_RESTORE_CR3 scratch_reg:req save_reg:req | 
|---|
| 289 | .endm | 
|---|
| 290 |  | 
|---|
| 291 | #endif | 
|---|
| 292 |  | 
|---|
| 293 | /* | 
|---|
| 294 | * IBRS kernel mitigation for Spectre_v2. | 
|---|
| 295 | * | 
|---|
| 296 | * Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers | 
|---|
| 297 | * the regs it uses (AX, CX, DX). Must be called before the first RET | 
|---|
| 298 | * instruction (NOTE! UNTRAIN_RET includes a RET instruction) | 
|---|
| 299 | * | 
|---|
| 300 | * The optional argument is used to save/restore the current value, | 
|---|
| 301 | * which is used on the paranoid paths. | 
|---|
| 302 | * | 
|---|
| 303 | * Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set. | 
|---|
| 304 | */ | 
|---|
| 305 | .macro IBRS_ENTER save_reg | 
|---|
| 306 | #ifdef CONFIG_MITIGATION_IBRS_ENTRY | 
|---|
| 307 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | 
|---|
| 308 | movl	$MSR_IA32_SPEC_CTRL, %ecx | 
|---|
| 309 |  | 
|---|
| 310 | .ifnb \save_reg | 
|---|
| 311 | rdmsr | 
|---|
| 312 | shl	$32, %rdx | 
|---|
| 313 | or	%rdx, %rax | 
|---|
| 314 | mov	%rax, \save_reg | 
|---|
| 315 | test	$SPEC_CTRL_IBRS, %eax | 
|---|
| 316 | jz	.Ldo_wrmsr_\@ | 
|---|
| 317 | lfence | 
|---|
| 318 | jmp	.Lend_\@ | 
|---|
| 319 | .Ldo_wrmsr_\@: | 
|---|
| 320 | .endif | 
|---|
| 321 |  | 
|---|
| 322 | movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx | 
|---|
| 323 | movl	%edx, %eax | 
|---|
| 324 | shr	$32, %rdx | 
|---|
| 325 | wrmsr | 
|---|
| 326 | .Lend_\@: | 
|---|
| 327 | #endif | 
|---|
| 328 | .endm | 
|---|
| 329 |  | 
|---|
| 330 | /* | 
|---|
| 331 | * Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX) | 
|---|
| 332 | * regs. Must be called after the last RET. | 
|---|
| 333 | */ | 
|---|
| 334 | .macro IBRS_EXIT save_reg | 
|---|
| 335 | #ifdef CONFIG_MITIGATION_IBRS_ENTRY | 
|---|
| 336 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS | 
|---|
| 337 | movl	$MSR_IA32_SPEC_CTRL, %ecx | 
|---|
| 338 |  | 
|---|
| 339 | .ifnb \save_reg | 
|---|
| 340 | mov	\save_reg, %rdx | 
|---|
| 341 | .else | 
|---|
| 342 | movq	PER_CPU_VAR(x86_spec_ctrl_current), %rdx | 
|---|
| 343 | andl	$(~SPEC_CTRL_IBRS), %edx | 
|---|
| 344 | .endif | 
|---|
| 345 |  | 
|---|
| 346 | movl	%edx, %eax | 
|---|
| 347 | shr	$32, %rdx | 
|---|
| 348 | wrmsr | 
|---|
| 349 | .Lend_\@: | 
|---|
| 350 | #endif | 
|---|
| 351 | .endm | 
|---|
| 352 |  | 
|---|
| 353 | /* | 
|---|
| 354 | * Mitigate Spectre v1 for conditional swapgs code paths. | 
|---|
| 355 | * | 
|---|
| 356 | * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to | 
|---|
| 357 | * prevent a speculative swapgs when coming from kernel space. | 
|---|
| 358 | * | 
|---|
| 359 | * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path, | 
|---|
| 360 | * to prevent the swapgs from getting speculatively skipped when coming from | 
|---|
| 361 | * user space. | 
|---|
| 362 | */ | 
|---|
| 363 | .macro FENCE_SWAPGS_USER_ENTRY | 
|---|
| 364 | ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER | 
|---|
| 365 | .endm | 
|---|
| 366 | .macro FENCE_SWAPGS_KERNEL_ENTRY | 
|---|
| 367 | ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL | 
|---|
| 368 | .endm | 
|---|
| 369 |  | 
|---|
| 370 | .macro STACKLEAK_ERASE_NOCLOBBER | 
|---|
| 371 | #ifdef CONFIG_KSTACK_ERASE | 
|---|
| 372 | PUSH_AND_CLEAR_REGS | 
|---|
| 373 | call stackleak_erase | 
|---|
| 374 | POP_REGS | 
|---|
| 375 | #endif | 
|---|
| 376 | .endm | 
|---|
| 377 |  | 
|---|
| 378 | .macro SAVE_AND_SET_GSBASE scratch_reg:req save_reg:req | 
|---|
| 379 | rdgsbase \save_reg | 
|---|
| 380 | GET_PERCPU_BASE \scratch_reg | 
|---|
| 381 | wrgsbase \scratch_reg | 
|---|
| 382 | .endm | 
|---|
| 383 |  | 
|---|
| 384 | #else /* CONFIG_X86_64 */ | 
|---|
| 385 | # undef		UNWIND_HINT_IRET_REGS | 
|---|
| 386 | # define	UNWIND_HINT_IRET_REGS | 
|---|
| 387 | #endif /* !CONFIG_X86_64 */ | 
|---|
| 388 |  | 
|---|
| 389 | .macro STACKLEAK_ERASE | 
|---|
| 390 | #ifdef CONFIG_KSTACK_ERASE | 
|---|
| 391 | call stackleak_erase | 
|---|
| 392 | #endif | 
|---|
| 393 | .endm | 
|---|
| 394 |  | 
|---|
| 395 | #ifdef CONFIG_SMP | 
|---|
| 396 |  | 
|---|
| 397 | /* | 
|---|
| 398 | * CPU/node NR is loaded from the limit (size) field of a special segment | 
|---|
| 399 | * descriptor entry in GDT. | 
|---|
| 400 | */ | 
|---|
| 401 | .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req | 
|---|
| 402 | movq	$__CPUNODE_SEG, \reg | 
|---|
| 403 | lsl	\reg, \reg | 
|---|
| 404 | .endm | 
|---|
| 405 |  | 
|---|
| 406 | /* | 
|---|
| 407 | * Fetch the per-CPU GSBASE value for this processor and put it in @reg. | 
|---|
| 408 | * We normally use %gs for accessing per-CPU data, but we are setting up | 
|---|
| 409 | * %gs here and obviously can not use %gs itself to access per-CPU data. | 
|---|
| 410 | * | 
|---|
| 411 | * Do not use RDPID, because KVM loads guest's TSC_AUX on vm-entry and | 
|---|
| 412 | * may not restore the host's value until the CPU returns to userspace. | 
|---|
| 413 | * Thus the kernel would consume a guest's TSC_AUX if an NMI arrives | 
|---|
| 414 | * while running KVM's run loop. | 
|---|
| 415 | */ | 
|---|
| 416 | .macro GET_PERCPU_BASE reg:req | 
|---|
| 417 | LOAD_CPU_AND_NODE_SEG_LIMIT \reg | 
|---|
| 418 | andq	$VDSO_CPUNODE_MASK, \reg | 
|---|
| 419 | movq	__per_cpu_offset(, \reg, 8), \reg | 
|---|
| 420 | .endm | 
|---|
| 421 |  | 
|---|
| 422 | #else | 
|---|
| 423 |  | 
|---|
| 424 | .macro GET_PERCPU_BASE reg:req | 
|---|
| 425 | movq	pcpu_unit_offsets(%rip), \reg | 
|---|
| 426 | .endm | 
|---|
| 427 |  | 
|---|
| 428 | #endif /* CONFIG_SMP */ | 
|---|
| 429 |  | 
|---|
| 430 | #ifdef CONFIG_X86_64 | 
|---|
| 431 |  | 
|---|
| 432 | /* rdi:	arg1 ... normal C conventions. rax is saved/restored. */ | 
|---|
| 433 | .macro THUNK name, func | 
|---|
| 434 | SYM_FUNC_START(\name) | 
|---|
| 435 | ANNOTATE_NOENDBR | 
|---|
| 436 | pushq %rbp | 
|---|
| 437 | movq %rsp, %rbp | 
|---|
| 438 |  | 
|---|
| 439 | pushq %rdi | 
|---|
| 440 | pushq %rsi | 
|---|
| 441 | pushq %rdx | 
|---|
| 442 | pushq %rcx | 
|---|
| 443 | pushq %rax | 
|---|
| 444 | pushq %r8 | 
|---|
| 445 | pushq %r9 | 
|---|
| 446 | pushq %r10 | 
|---|
| 447 | pushq %r11 | 
|---|
| 448 |  | 
|---|
| 449 | call \func | 
|---|
| 450 |  | 
|---|
| 451 | popq %r11 | 
|---|
| 452 | popq %r10 | 
|---|
| 453 | popq %r9 | 
|---|
| 454 | popq %r8 | 
|---|
| 455 | popq %rax | 
|---|
| 456 | popq %rcx | 
|---|
| 457 | popq %rdx | 
|---|
| 458 | popq %rsi | 
|---|
| 459 | popq %rdi | 
|---|
| 460 | popq %rbp | 
|---|
| 461 | RET | 
|---|
| 462 | SYM_FUNC_END(\name) | 
|---|
| 463 | _ASM_NOKPROBE(\name) | 
|---|
| 464 | .endm | 
|---|
| 465 |  | 
|---|
| 466 | #else /* CONFIG_X86_32 */ | 
|---|
| 467 |  | 
|---|
| 468 | /* put return address in eax (arg1) */ | 
|---|
| 469 | .macro THUNK name, func, put_ret_addr_in_eax=0 | 
|---|
| 470 | SYM_CODE_START_NOALIGN(\name) | 
|---|
| 471 | pushl %eax | 
|---|
| 472 | pushl %ecx | 
|---|
| 473 | pushl %edx | 
|---|
| 474 |  | 
|---|
| 475 | .if \put_ret_addr_in_eax | 
|---|
| 476 | /* Place EIP in the arg1 */ | 
|---|
| 477 | movl 3*4(%esp), %eax | 
|---|
| 478 | .endif | 
|---|
| 479 |  | 
|---|
| 480 | call \func | 
|---|
| 481 | popl %edx | 
|---|
| 482 | popl %ecx | 
|---|
| 483 | popl %eax | 
|---|
| 484 | RET | 
|---|
| 485 | _ASM_NOKPROBE(\name) | 
|---|
| 486 | SYM_CODE_END(\name) | 
|---|
| 487 | .endm | 
|---|
| 488 |  | 
|---|
| 489 | #endif | 
|---|
| 490 |  | 
|---|