| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|---|
| 2 | #ifndef _ASM_X86_TEXT_PATCHING_H | 
|---|
| 3 | #define _ASM_X86_TEXT_PATCHING_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/types.h> | 
|---|
| 6 | #include <linux/stddef.h> | 
|---|
| 7 | #include <asm/ptrace.h> | 
|---|
| 8 |  | 
|---|
| 9 | /* | 
|---|
| 10 | * Currently, the max observed size in the kernel code is | 
|---|
| 11 | * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. | 
|---|
| 12 | * Raise it if needed. | 
|---|
| 13 | */ | 
|---|
| 14 | #define TEXT_POKE_MAX_OPCODE_SIZE	5 | 
|---|
| 15 |  | 
|---|
| 16 | extern void text_poke_early(void *addr, const void *opcode, size_t len); | 
|---|
| 17 |  | 
|---|
| 18 | extern void text_poke_apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len); | 
|---|
| 19 |  | 
|---|
| 20 | /* | 
|---|
| 21 | * Clear and restore the kernel write-protection flag on the local CPU. | 
|---|
| 22 | * Allows the kernel to edit read-only pages. | 
|---|
| 23 | * Side-effect: any interrupt handler running between save and restore will have | 
|---|
| 24 | * the ability to write to read-only pages. | 
|---|
| 25 | * | 
|---|
| 26 | * Warning: | 
|---|
| 27 | * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and | 
|---|
| 28 | * no thread can be preempted in the instructions being modified (no iret to an | 
|---|
| 29 | * invalid instruction possible) or if the instructions are changed from a | 
|---|
| 30 | * consistent state to another consistent state atomically. | 
|---|
| 31 | * On the local CPU you need to be protected against NMI or MCE handlers seeing | 
|---|
| 32 | * an inconsistent instruction while you patch. | 
|---|
| 33 | */ | 
|---|
| 34 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 
|---|
| 35 | extern void smp_text_poke_sync_each_cpu(void); | 
|---|
| 36 | extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); | 
|---|
| 37 | extern void *text_poke_copy(void *addr, const void *opcode, size_t len); | 
|---|
| 38 | #define text_poke_copy text_poke_copy | 
|---|
| 39 | extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok); | 
|---|
| 40 | extern void *text_poke_set(void *addr, int c, size_t len); | 
|---|
| 41 | extern int smp_text_poke_int3_handler(struct pt_regs *regs); | 
|---|
| 42 | extern void smp_text_poke_single(void *addr, const void *opcode, size_t len, const void *emulate); | 
|---|
| 43 |  | 
|---|
| 44 | extern void smp_text_poke_batch_add(void *addr, const void *opcode, size_t len, const void *emulate); | 
|---|
| 45 | extern void smp_text_poke_batch_finish(void); | 
|---|
| 46 |  | 
|---|
| 47 | #define INT3_INSN_SIZE		1 | 
|---|
| 48 | #define INT3_INSN_OPCODE	0xCC | 
|---|
| 49 |  | 
|---|
| 50 | #define RET_INSN_SIZE		1 | 
|---|
| 51 | #define RET_INSN_OPCODE		0xC3 | 
|---|
| 52 |  | 
|---|
| 53 | #define CALL_INSN_SIZE		5 | 
|---|
| 54 | #define CALL_INSN_OPCODE	0xE8 | 
|---|
| 55 |  | 
|---|
| 56 | #define JMP32_INSN_SIZE		5 | 
|---|
| 57 | #define JMP32_INSN_OPCODE	0xE9 | 
|---|
| 58 |  | 
|---|
| 59 | #define JMP8_INSN_SIZE		2 | 
|---|
| 60 | #define JMP8_INSN_OPCODE	0xEB | 
|---|
| 61 |  | 
|---|
| 62 | #define DISP32_SIZE		4 | 
|---|
| 63 |  | 
|---|
| 64 | static __always_inline int text_opcode_size(u8 opcode) | 
|---|
| 65 | { | 
|---|
| 66 | int size = 0; | 
|---|
| 67 |  | 
|---|
| 68 | #define __CASE(insn)	\ | 
|---|
| 69 | case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break | 
|---|
| 70 |  | 
|---|
| 71 | switch(opcode) { | 
|---|
| 72 | __CASE(INT3); | 
|---|
| 73 | __CASE(RET); | 
|---|
| 74 | __CASE(CALL); | 
|---|
| 75 | __CASE(JMP32); | 
|---|
| 76 | __CASE(JMP8); | 
|---|
| 77 | } | 
|---|
| 78 |  | 
|---|
| 79 | #undef __CASE | 
|---|
| 80 |  | 
|---|
| 81 | return size; | 
|---|
| 82 | } | 
|---|
| 83 |  | 
|---|
| 84 | union text_poke_insn { | 
|---|
| 85 | u8 text[TEXT_POKE_MAX_OPCODE_SIZE]; | 
|---|
| 86 | struct { | 
|---|
| 87 | u8 opcode; | 
|---|
| 88 | s32 disp; | 
|---|
| 89 | } __attribute__((packed)); | 
|---|
| 90 | }; | 
|---|
| 91 |  | 
|---|
| 92 | static __always_inline | 
|---|
| 93 | void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size) | 
|---|
| 94 | { | 
|---|
| 95 | union text_poke_insn *insn = buf; | 
|---|
| 96 |  | 
|---|
| 97 | BUG_ON(size < text_opcode_size(opcode)); | 
|---|
| 98 |  | 
|---|
| 99 | /* | 
|---|
| 100 | * Hide the addresses to avoid the compiler folding in constants when | 
|---|
| 101 | * referencing code, these can mess up annotations like | 
|---|
| 102 | * ANNOTATE_NOENDBR. | 
|---|
| 103 | */ | 
|---|
| 104 | OPTIMIZER_HIDE_VAR(insn); | 
|---|
| 105 | OPTIMIZER_HIDE_VAR(addr); | 
|---|
| 106 | OPTIMIZER_HIDE_VAR(dest); | 
|---|
| 107 |  | 
|---|
| 108 | insn->opcode = opcode; | 
|---|
| 109 |  | 
|---|
| 110 | if (size > 1) { | 
|---|
| 111 | insn->disp = (long)dest - (long)(addr + size); | 
|---|
| 112 | if (size == 2) { | 
|---|
| 113 | /* | 
|---|
| 114 | * Ensure that for JMP8 the displacement | 
|---|
| 115 | * actually fits the signed byte. | 
|---|
| 116 | */ | 
|---|
| 117 | BUG_ON((insn->disp >> 31) != (insn->disp >> 7)); | 
|---|
| 118 | } | 
|---|
| 119 | } | 
|---|
| 120 | } | 
|---|
| 121 |  | 
|---|
| 122 | static __always_inline | 
|---|
| 123 | void *text_gen_insn(u8 opcode, const void *addr, const void *dest) | 
|---|
| 124 | { | 
|---|
| 125 | static union text_poke_insn insn; /* per instance */ | 
|---|
| 126 | __text_gen_insn(buf: &insn, opcode, addr, dest, size: text_opcode_size(opcode)); | 
|---|
| 127 | return &insn.text; | 
|---|
| 128 | } | 
|---|
| 129 |  | 
|---|
| 130 | extern int after_bootmem; | 
|---|
| 131 | extern __ro_after_init struct mm_struct *text_poke_mm; | 
|---|
| 132 | extern __ro_after_init unsigned long text_poke_mm_addr; | 
|---|
| 133 |  | 
|---|
| 134 | #ifndef CONFIG_UML_X86 | 
|---|
| 135 | static __always_inline | 
|---|
| 136 | void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) | 
|---|
| 137 | { | 
|---|
| 138 | regs->ip = ip; | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | static __always_inline | 
|---|
| 142 | void int3_emulate_push(struct pt_regs *regs, unsigned long val) | 
|---|
| 143 | { | 
|---|
| 144 | /* | 
|---|
| 145 | * The INT3 handler in entry_64.S adds a gap between the | 
|---|
| 146 | * stack where the break point happened, and the saving of | 
|---|
| 147 | * pt_regs. We can extend the original stack because of | 
|---|
| 148 | * this gap. See the idtentry macro's X86_TRAP_BP logic. | 
|---|
| 149 | * | 
|---|
| 150 | * Similarly, entry_32.S will have a gap on the stack for | 
|---|
| 151 | * (any) hardware exception and pt_regs; see the | 
|---|
| 152 | * FIXUP_FRAME macro. | 
|---|
| 153 | */ | 
|---|
| 154 | regs->sp -= sizeof(unsigned long); | 
|---|
| 155 | *(unsigned long *)regs->sp = val; | 
|---|
| 156 | } | 
|---|
| 157 |  | 
|---|
| 158 | static __always_inline | 
|---|
| 159 | unsigned long int3_emulate_pop(struct pt_regs *regs) | 
|---|
| 160 | { | 
|---|
| 161 | unsigned long val = *(unsigned long *)regs->sp; | 
|---|
| 162 | regs->sp += sizeof(unsigned long); | 
|---|
| 163 | return val; | 
|---|
| 164 | } | 
|---|
| 165 |  | 
|---|
| 166 | static __always_inline | 
|---|
| 167 | void int3_emulate_call(struct pt_regs *regs, unsigned long func) | 
|---|
| 168 | { | 
|---|
| 169 | int3_emulate_push(regs, val: regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); | 
|---|
| 170 | int3_emulate_jmp(regs, ip: func); | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | static __always_inline | 
|---|
| 174 | void int3_emulate_ret(struct pt_regs *regs) | 
|---|
| 175 | { | 
|---|
| 176 | unsigned long ip = int3_emulate_pop(regs); | 
|---|
| 177 | int3_emulate_jmp(regs, ip); | 
|---|
| 178 | } | 
|---|
| 179 |  | 
|---|
| 180 | static __always_inline | 
|---|
| 181 | bool __emulate_cc(unsigned long flags, u8 cc) | 
|---|
| 182 | { | 
|---|
| 183 | static const unsigned long cc_mask[6] = { | 
|---|
| 184 | [0] = X86_EFLAGS_OF, | 
|---|
| 185 | [1] = X86_EFLAGS_CF, | 
|---|
| 186 | [2] = X86_EFLAGS_ZF, | 
|---|
| 187 | [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, | 
|---|
| 188 | [4] = X86_EFLAGS_SF, | 
|---|
| 189 | [5] = X86_EFLAGS_PF, | 
|---|
| 190 | }; | 
|---|
| 191 |  | 
|---|
| 192 | bool invert = cc & 1; | 
|---|
| 193 | bool match; | 
|---|
| 194 |  | 
|---|
| 195 | if (cc < 0xc) { | 
|---|
| 196 | match = flags & cc_mask[cc >> 1]; | 
|---|
| 197 | } else { | 
|---|
| 198 | match = ((flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ | 
|---|
| 199 | ((flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); | 
|---|
| 200 | if (cc >= 0xe) | 
|---|
| 201 | match = match || (flags & X86_EFLAGS_ZF); | 
|---|
| 202 | } | 
|---|
| 203 |  | 
|---|
| 204 | return (match && !invert) || (!match && invert); | 
|---|
| 205 | } | 
|---|
| 206 |  | 
|---|
| 207 | static __always_inline | 
|---|
| 208 | void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp) | 
|---|
| 209 | { | 
|---|
| 210 | if (__emulate_cc(flags: regs->flags, cc)) | 
|---|
| 211 | ip += disp; | 
|---|
| 212 |  | 
|---|
| 213 | int3_emulate_jmp(regs, ip); | 
|---|
| 214 | } | 
|---|
| 215 |  | 
|---|
| 216 | #endif /* !CONFIG_UML_X86 */ | 
|---|
| 217 |  | 
|---|
| 218 | #endif /* _ASM_X86_TEXT_PATCHING_H */ | 
|---|
| 219 |  | 
|---|