| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | #include <linux/errno.h> | 
|---|
| 3 | #include <linux/kernel.h> | 
|---|
| 4 | #include <linux/sched.h> | 
|---|
| 5 | #include <linux/sched/task_stack.h> | 
|---|
| 6 | #include <linux/perf_event.h> | 
|---|
| 7 | #include <linux/bug.h> | 
|---|
| 8 | #include <linux/stddef.h> | 
|---|
| 9 | #include <asm/perf_regs.h> | 
|---|
| 10 | #include <asm/ptrace.h> | 
|---|
| 11 |  | 
|---|
| 12 | #ifdef CONFIG_X86_32 | 
|---|
| 13 | #define PERF_REG_X86_MAX PERF_REG_X86_32_MAX | 
|---|
| 14 | #else | 
|---|
| 15 | #define PERF_REG_X86_MAX PERF_REG_X86_64_MAX | 
|---|
| 16 | #endif | 
|---|
| 17 |  | 
|---|
| 18 | #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) | 
|---|
| 19 |  | 
|---|
| 20 | static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { | 
|---|
| 21 | PT_REGS_OFFSET(PERF_REG_X86_AX, ax), | 
|---|
| 22 | PT_REGS_OFFSET(PERF_REG_X86_BX, bx), | 
|---|
| 23 | PT_REGS_OFFSET(PERF_REG_X86_CX, cx), | 
|---|
| 24 | PT_REGS_OFFSET(PERF_REG_X86_DX, dx), | 
|---|
| 25 | PT_REGS_OFFSET(PERF_REG_X86_SI, si), | 
|---|
| 26 | PT_REGS_OFFSET(PERF_REG_X86_DI, di), | 
|---|
| 27 | PT_REGS_OFFSET(PERF_REG_X86_BP, bp), | 
|---|
| 28 | PT_REGS_OFFSET(PERF_REG_X86_SP, sp), | 
|---|
| 29 | PT_REGS_OFFSET(PERF_REG_X86_IP, ip), | 
|---|
| 30 | PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags), | 
|---|
| 31 | PT_REGS_OFFSET(PERF_REG_X86_CS, cs), | 
|---|
| 32 | PT_REGS_OFFSET(PERF_REG_X86_SS, ss), | 
|---|
| 33 | #ifdef CONFIG_X86_32 | 
|---|
| 34 | PT_REGS_OFFSET(PERF_REG_X86_DS, ds), | 
|---|
| 35 | PT_REGS_OFFSET(PERF_REG_X86_ES, es), | 
|---|
| 36 | PT_REGS_OFFSET(PERF_REG_X86_FS, fs), | 
|---|
| 37 | PT_REGS_OFFSET(PERF_REG_X86_GS, gs), | 
|---|
| 38 | #else | 
|---|
| 39 | /* | 
|---|
| 40 | * The pt_regs struct does not store | 
|---|
| 41 | * ds, es, fs, gs in 64 bit mode. | 
|---|
| 42 | */ | 
|---|
| 43 | (unsigned int) -1, | 
|---|
| 44 | (unsigned int) -1, | 
|---|
| 45 | (unsigned int) -1, | 
|---|
| 46 | (unsigned int) -1, | 
|---|
| 47 | #endif | 
|---|
| 48 | #ifdef CONFIG_X86_64 | 
|---|
| 49 | PT_REGS_OFFSET(PERF_REG_X86_R8, r8), | 
|---|
| 50 | PT_REGS_OFFSET(PERF_REG_X86_R9, r9), | 
|---|
| 51 | PT_REGS_OFFSET(PERF_REG_X86_R10, r10), | 
|---|
| 52 | PT_REGS_OFFSET(PERF_REG_X86_R11, r11), | 
|---|
| 53 | PT_REGS_OFFSET(PERF_REG_X86_R12, r12), | 
|---|
| 54 | PT_REGS_OFFSET(PERF_REG_X86_R13, r13), | 
|---|
| 55 | PT_REGS_OFFSET(PERF_REG_X86_R14, r14), | 
|---|
| 56 | PT_REGS_OFFSET(PERF_REG_X86_R15, r15), | 
|---|
| 57 | #endif | 
|---|
| 58 | }; | 
|---|
| 59 |  | 
|---|
| 60 | u64 perf_reg_value(struct pt_regs *regs, int idx) | 
|---|
| 61 | { | 
|---|
| 62 | struct x86_perf_regs *perf_regs; | 
|---|
| 63 |  | 
|---|
| 64 | if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { | 
|---|
| 65 | perf_regs = container_of(regs, struct x86_perf_regs, regs); | 
|---|
| 66 | if (!perf_regs->xmm_regs) | 
|---|
| 67 | return 0; | 
|---|
| 68 | return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; | 
|---|
| 69 | } | 
|---|
| 70 |  | 
|---|
| 71 | if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) | 
|---|
| 72 | return 0; | 
|---|
| 73 |  | 
|---|
| 74 | return regs_get_register(regs, offset: pt_regs_offset[idx]); | 
|---|
| 75 | } | 
|---|
| 76 |  | 
|---|
| 77 | #define PERF_REG_X86_RESERVED	(((1ULL << PERF_REG_X86_XMM0) - 1) & \ | 
|---|
| 78 | ~((1ULL << PERF_REG_X86_MAX) - 1)) | 
|---|
| 79 |  | 
|---|
| 80 | #ifdef CONFIG_X86_32 | 
|---|
| 81 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ | 
|---|
| 82 | (1ULL << PERF_REG_X86_R9) | \ | 
|---|
| 83 | (1ULL << PERF_REG_X86_R10) | \ | 
|---|
| 84 | (1ULL << PERF_REG_X86_R11) | \ | 
|---|
| 85 | (1ULL << PERF_REG_X86_R12) | \ | 
|---|
| 86 | (1ULL << PERF_REG_X86_R13) | \ | 
|---|
| 87 | (1ULL << PERF_REG_X86_R14) | \ | 
|---|
| 88 | (1ULL << PERF_REG_X86_R15)) | 
|---|
| 89 |  | 
|---|
| 90 | int perf_reg_validate(u64 mask) | 
|---|
| 91 | { | 
|---|
| 92 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) | 
|---|
| 93 | return -EINVAL; | 
|---|
| 94 |  | 
|---|
| 95 | return 0; | 
|---|
| 96 | } | 
|---|
| 97 |  | 
|---|
| 98 | u64 perf_reg_abi(struct task_struct *task) | 
|---|
| 99 | { | 
|---|
| 100 | return PERF_SAMPLE_REGS_ABI_32; | 
|---|
| 101 | } | 
|---|
| 102 |  | 
|---|
| 103 | void perf_get_regs_user(struct perf_regs *regs_user, | 
|---|
| 104 | struct pt_regs *regs) | 
|---|
| 105 | { | 
|---|
| 106 | regs_user->regs = task_pt_regs(current); | 
|---|
| 107 | regs_user->abi = perf_reg_abi(current); | 
|---|
| 108 | } | 
|---|
| 109 | #else /* CONFIG_X86_64 */ | 
|---|
| 110 | #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ | 
|---|
| 111 | (1ULL << PERF_REG_X86_ES) | \ | 
|---|
| 112 | (1ULL << PERF_REG_X86_FS) | \ | 
|---|
| 113 | (1ULL << PERF_REG_X86_GS)) | 
|---|
| 114 |  | 
|---|
| 115 | int perf_reg_validate(u64 mask) | 
|---|
| 116 | { | 
|---|
| 117 | if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) | 
|---|
| 118 | return -EINVAL; | 
|---|
| 119 |  | 
|---|
| 120 | return 0; | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | u64 perf_reg_abi(struct task_struct *task) | 
|---|
| 124 | { | 
|---|
| 125 | if (!user_64bit_mode(task_pt_regs(task))) | 
|---|
| 126 | return PERF_SAMPLE_REGS_ABI_32; | 
|---|
| 127 | else | 
|---|
| 128 | return PERF_SAMPLE_REGS_ABI_64; | 
|---|
| 129 | } | 
|---|
| 130 |  | 
|---|
| 131 | static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs); | 
|---|
| 132 |  | 
|---|
| 133 | void perf_get_regs_user(struct perf_regs *regs_user, | 
|---|
| 134 | struct pt_regs *regs) | 
|---|
| 135 | { | 
|---|
| 136 | struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs); | 
|---|
| 137 | struct pt_regs *user_regs = task_pt_regs(current); | 
|---|
| 138 |  | 
|---|
| 139 | if (!in_nmi()) { | 
|---|
| 140 | regs_user->regs = user_regs; | 
|---|
| 141 | regs_user->abi = perf_reg_abi(current); | 
|---|
| 142 | return; | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | /* | 
|---|
| 146 | * If we're in an NMI that interrupted task_pt_regs setup, then | 
|---|
| 147 | * we can't sample user regs at all.  This check isn't really | 
|---|
| 148 | * sufficient, though, as we could be in an NMI inside an interrupt | 
|---|
| 149 | * that happened during task_pt_regs setup. | 
|---|
| 150 | */ | 
|---|
| 151 | if (regs->sp > (unsigned long)&user_regs->r11 && | 
|---|
| 152 | regs->sp <= (unsigned long)(user_regs + 1)) { | 
|---|
| 153 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; | 
|---|
| 154 | regs_user->regs = NULL; | 
|---|
| 155 | return; | 
|---|
| 156 | } | 
|---|
| 157 |  | 
|---|
| 158 | /* | 
|---|
| 159 | * These registers are always saved on 64-bit syscall entry. | 
|---|
| 160 | * On 32-bit entry points, they are saved too except r8..r11. | 
|---|
| 161 | */ | 
|---|
| 162 | regs_user_copy->ip = user_regs->ip; | 
|---|
| 163 | regs_user_copy->ax = user_regs->ax; | 
|---|
| 164 | regs_user_copy->cx = user_regs->cx; | 
|---|
| 165 | regs_user_copy->dx = user_regs->dx; | 
|---|
| 166 | regs_user_copy->si = user_regs->si; | 
|---|
| 167 | regs_user_copy->di = user_regs->di; | 
|---|
| 168 | regs_user_copy->r8 = user_regs->r8; | 
|---|
| 169 | regs_user_copy->r9 = user_regs->r9; | 
|---|
| 170 | regs_user_copy->r10 = user_regs->r10; | 
|---|
| 171 | regs_user_copy->r11 = user_regs->r11; | 
|---|
| 172 | regs_user_copy->orig_ax = user_regs->orig_ax; | 
|---|
| 173 | regs_user_copy->flags = user_regs->flags; | 
|---|
| 174 | regs_user_copy->sp = user_regs->sp; | 
|---|
| 175 | regs_user_copy->cs = user_regs->cs; | 
|---|
| 176 | regs_user_copy->ss = user_regs->ss; | 
|---|
| 177 | /* | 
|---|
| 178 | * Store user space frame-pointer value on sample | 
|---|
| 179 | * to facilitate stack unwinding for cases when | 
|---|
| 180 | * user space executable code has such support | 
|---|
| 181 | * enabled at compile time: | 
|---|
| 182 | */ | 
|---|
| 183 | regs_user_copy->bp = user_regs->bp; | 
|---|
| 184 |  | 
|---|
| 185 | regs_user_copy->bx = -1; | 
|---|
| 186 | regs_user_copy->r12 = -1; | 
|---|
| 187 | regs_user_copy->r13 = -1; | 
|---|
| 188 | regs_user_copy->r14 = -1; | 
|---|
| 189 | regs_user_copy->r15 = -1; | 
|---|
| 190 | /* | 
|---|
| 191 | * For this to be at all useful, we need a reasonable guess for | 
|---|
| 192 | * the ABI.  Be careful: we're in NMI context, and we're | 
|---|
| 193 | * considering current to be the current task, so we should | 
|---|
| 194 | * be careful not to look at any other percpu variables that might | 
|---|
| 195 | * change during context switches. | 
|---|
| 196 | */ | 
|---|
| 197 | regs_user->abi = user_64bit_mode(regs: user_regs) ? | 
|---|
| 198 | PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; | 
|---|
| 199 |  | 
|---|
| 200 | regs_user->regs = regs_user_copy; | 
|---|
| 201 | } | 
|---|
| 202 | #endif /* CONFIG_X86_32 */ | 
|---|
| 203 |  | 
|---|