| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * FPU register's regset abstraction, for ptrace, core dumps, etc. | 
|---|
| 4 | */ | 
|---|
| 5 | #include <linux/sched/task_stack.h> | 
|---|
| 6 | #include <linux/vmalloc.h> | 
|---|
| 7 |  | 
|---|
| 8 | #include <asm/fpu/api.h> | 
|---|
| 9 | #include <asm/fpu/signal.h> | 
|---|
| 10 | #include <asm/fpu/regset.h> | 
|---|
| 11 | #include <asm/prctl.h> | 
|---|
| 12 |  | 
|---|
| 13 | #include "context.h" | 
|---|
| 14 | #include "internal.h" | 
|---|
| 15 | #include "legacy.h" | 
|---|
| 16 | #include "xstate.h" | 
|---|
| 17 |  | 
|---|
| 18 | /* | 
|---|
| 19 | * The xstateregs_active() routine is the same as the regset_fpregs_active() routine, | 
|---|
| 20 | * as the "regset->n" for the xstate regset will be updated based on the feature | 
|---|
| 21 | * capabilities supported by the xsave. | 
|---|
| 22 | */ | 
|---|
| 23 | int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset) | 
|---|
| 24 | { | 
|---|
| 25 | return regset->n; | 
|---|
| 26 | } | 
|---|
| 27 |  | 
|---|
| 28 | int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset) | 
|---|
| 29 | { | 
|---|
| 30 | if (boot_cpu_has(X86_FEATURE_FXSR)) | 
|---|
| 31 | return regset->n; | 
|---|
| 32 | else | 
|---|
| 33 | return 0; | 
|---|
| 34 | } | 
|---|
| 35 |  | 
|---|
| 36 | /* | 
|---|
| 37 | * The regset get() functions are invoked from: | 
|---|
| 38 | * | 
|---|
| 39 | *   - coredump to dump the current task's fpstate. If the current task | 
|---|
| 40 | *     owns the FPU then the memory state has to be synchronized and the | 
|---|
| 41 | *     FPU register state preserved. Otherwise fpstate is already in sync. | 
|---|
| 42 | * | 
|---|
| 43 | *   - ptrace to dump fpstate of a stopped task, in which case the registers | 
|---|
| 44 | *     have already been saved to fpstate on context switch. | 
|---|
| 45 | */ | 
|---|
| 46 | static void sync_fpstate(struct fpu *fpu) | 
|---|
| 47 | { | 
|---|
| 48 | if (fpu == x86_task_fpu(current)) | 
|---|
| 49 | fpu_sync_fpstate(fpu); | 
|---|
| 50 | } | 
|---|
| 51 |  | 
|---|
| 52 | /* | 
|---|
| 53 | * Invalidate cached FPU registers before modifying the stopped target | 
|---|
| 54 | * task's fpstate. | 
|---|
| 55 | * | 
|---|
| 56 | * This forces the target task on resume to restore the FPU registers from | 
|---|
| 57 | * modified fpstate. Otherwise the task might skip the restore and operate | 
|---|
| 58 | * with the cached FPU registers which discards the modifications. | 
|---|
| 59 | */ | 
|---|
| 60 | static void fpu_force_restore(struct fpu *fpu) | 
|---|
| 61 | { | 
|---|
| 62 | /* | 
|---|
| 63 | * Only stopped child tasks can be used to modify the FPU | 
|---|
| 64 | * state in the fpstate buffer: | 
|---|
| 65 | */ | 
|---|
| 66 | WARN_ON_FPU(fpu == x86_task_fpu(current)); | 
|---|
| 67 |  | 
|---|
| 68 | __fpu_invalidate_fpregs_state(fpu); | 
|---|
| 69 | } | 
|---|
| 70 |  | 
|---|
| 71 | int xfpregs_get(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 72 | struct membuf to) | 
|---|
| 73 | { | 
|---|
| 74 | struct fpu *fpu = x86_task_fpu(task: target); | 
|---|
| 75 |  | 
|---|
| 76 | if (!cpu_feature_enabled(X86_FEATURE_FXSR)) | 
|---|
| 77 | return -ENODEV; | 
|---|
| 78 |  | 
|---|
| 79 | sync_fpstate(fpu); | 
|---|
| 80 |  | 
|---|
| 81 | if (!use_xsave()) { | 
|---|
| 82 | return membuf_write(s: &to, v: &fpu->fpstate->regs.fxsave, | 
|---|
| 83 | size: sizeof(fpu->fpstate->regs.fxsave)); | 
|---|
| 84 | } | 
|---|
| 85 |  | 
|---|
| 86 | copy_xstate_to_uabi_buf(to, tsk: target, mode: XSTATE_COPY_FX); | 
|---|
| 87 | return 0; | 
|---|
| 88 | } | 
|---|
| 89 |  | 
|---|
| 90 | int xfpregs_set(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 91 | unsigned int pos, unsigned int count, | 
|---|
| 92 | const void *kbuf, const void __user *ubuf) | 
|---|
| 93 | { | 
|---|
| 94 | struct fpu *fpu = x86_task_fpu(task: target); | 
|---|
| 95 | struct fxregs_state newstate; | 
|---|
| 96 | int ret; | 
|---|
| 97 |  | 
|---|
| 98 | if (!cpu_feature_enabled(X86_FEATURE_FXSR)) | 
|---|
| 99 | return -ENODEV; | 
|---|
| 100 |  | 
|---|
| 101 | /* No funny business with partial or oversized writes is permitted. */ | 
|---|
| 102 | if (pos != 0 || count != sizeof(newstate)) | 
|---|
| 103 | return -EINVAL; | 
|---|
| 104 |  | 
|---|
| 105 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &newstate, start_pos: 0, end_pos: -1); | 
|---|
| 106 | if (ret) | 
|---|
| 107 | return ret; | 
|---|
| 108 |  | 
|---|
| 109 | /* Do not allow an invalid MXCSR value. */ | 
|---|
| 110 | if (newstate.mxcsr & ~mxcsr_feature_mask) | 
|---|
| 111 | return -EINVAL; | 
|---|
| 112 |  | 
|---|
| 113 | fpu_force_restore(fpu); | 
|---|
| 114 |  | 
|---|
| 115 | /* Copy the state  */ | 
|---|
| 116 | memcpy(to: &fpu->fpstate->regs.fxsave, from: &newstate, len: sizeof(newstate)); | 
|---|
| 117 |  | 
|---|
| 118 | /* Clear xmm8..15 for 32-bit callers */ | 
|---|
| 119 | BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16); | 
|---|
| 120 | if (in_ia32_syscall()) | 
|---|
| 121 | memset(s: &fpu->fpstate->regs.fxsave.xmm_space[8*4], c: 0, n: 8 * 16); | 
|---|
| 122 |  | 
|---|
| 123 | /* Mark FP and SSE as in use when XSAVE is enabled */ | 
|---|
| 124 | if (use_xsave()) | 
|---|
| 125 | fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; | 
|---|
| 126 |  | 
|---|
| 127 | return 0; | 
|---|
| 128 | } | 
|---|
| 129 |  | 
|---|
| 130 | int xstateregs_get(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 131 | struct membuf to) | 
|---|
| 132 | { | 
|---|
| 133 | if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) | 
|---|
| 134 | return -ENODEV; | 
|---|
| 135 |  | 
|---|
| 136 | sync_fpstate(fpu: x86_task_fpu(task: target)); | 
|---|
| 137 |  | 
|---|
| 138 | copy_xstate_to_uabi_buf(to, tsk: target, mode: XSTATE_COPY_XSAVE); | 
|---|
| 139 | return 0; | 
|---|
| 140 | } | 
|---|
| 141 |  | 
|---|
| 142 | int xstateregs_set(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 143 | unsigned int pos, unsigned int count, | 
|---|
| 144 | const void *kbuf, const void __user *ubuf) | 
|---|
| 145 | { | 
|---|
| 146 | struct fpu *fpu = x86_task_fpu(task: target); | 
|---|
| 147 | struct xregs_state *tmpbuf = NULL; | 
|---|
| 148 | int ret; | 
|---|
| 149 |  | 
|---|
| 150 | if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) | 
|---|
| 151 | return -ENODEV; | 
|---|
| 152 |  | 
|---|
| 153 | /* | 
|---|
| 154 | * A whole standard-format XSAVE buffer is needed: | 
|---|
| 155 | */ | 
|---|
| 156 | if (pos != 0 || count != fpu_user_cfg.max_size) | 
|---|
| 157 | return -EFAULT; | 
|---|
| 158 |  | 
|---|
| 159 | if (!kbuf) { | 
|---|
| 160 | tmpbuf = vmalloc(count); | 
|---|
| 161 | if (!tmpbuf) | 
|---|
| 162 | return -ENOMEM; | 
|---|
| 163 |  | 
|---|
| 164 | if (copy_from_user(to: tmpbuf, from: ubuf, n: count)) { | 
|---|
| 165 | ret = -EFAULT; | 
|---|
| 166 | goto out; | 
|---|
| 167 | } | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | fpu_force_restore(fpu); | 
|---|
| 171 | ret = copy_uabi_from_kernel_to_xstate(fpstate: fpu->fpstate, kbuf: kbuf ?: tmpbuf, pkru: &target->thread.pkru); | 
|---|
| 172 |  | 
|---|
| 173 | out: | 
|---|
| 174 | vfree(addr: tmpbuf); | 
|---|
| 175 | return ret; | 
|---|
| 176 | } | 
|---|
| 177 |  | 
|---|
| 178 | #ifdef CONFIG_X86_USER_SHADOW_STACK | 
|---|
| 179 | int ssp_active(struct task_struct *target, const struct user_regset *regset) | 
|---|
| 180 | { | 
|---|
| 181 | if (target->thread.features & ARCH_SHSTK_SHSTK) | 
|---|
| 182 | return regset->n; | 
|---|
| 183 |  | 
|---|
| 184 | return 0; | 
|---|
| 185 | } | 
|---|
| 186 |  | 
|---|
| 187 | int ssp_get(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 188 | struct membuf to) | 
|---|
| 189 | { | 
|---|
| 190 | struct fpu *fpu = x86_task_fpu(target); | 
|---|
| 191 | struct cet_user_state *cetregs; | 
|---|
| 192 |  | 
|---|
| 193 | if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || | 
|---|
| 194 | !ssp_active(target, regset)) | 
|---|
| 195 | return -ENODEV; | 
|---|
| 196 |  | 
|---|
| 197 | sync_fpstate(fpu); | 
|---|
| 198 | cetregs = get_xsave_addr(&fpu->fpstate->regs.xsave, XFEATURE_CET_USER); | 
|---|
| 199 | if (WARN_ON(!cetregs)) { | 
|---|
| 200 | /* | 
|---|
| 201 | * This shouldn't ever be NULL because shadow stack was | 
|---|
| 202 | * verified to be enabled above. This means | 
|---|
| 203 | * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so | 
|---|
| 204 | * XFEATURE_CET_USER should not be in the init state. | 
|---|
| 205 | */ | 
|---|
| 206 | return -ENODEV; | 
|---|
| 207 | } | 
|---|
| 208 |  | 
|---|
| 209 | return membuf_write(&to, (unsigned long *)&cetregs->user_ssp, | 
|---|
| 210 | sizeof(cetregs->user_ssp)); | 
|---|
| 211 | } | 
|---|
| 212 |  | 
|---|
| 213 | int ssp_set(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 214 | unsigned int pos, unsigned int count, | 
|---|
| 215 | const void *kbuf, const void __user *ubuf) | 
|---|
| 216 | { | 
|---|
| 217 | struct fpu *fpu = x86_task_fpu(target); | 
|---|
| 218 | struct xregs_state *xsave = &fpu->fpstate->regs.xsave; | 
|---|
| 219 | struct cet_user_state *cetregs; | 
|---|
| 220 | unsigned long user_ssp; | 
|---|
| 221 | int r; | 
|---|
| 222 |  | 
|---|
| 223 | if (!cpu_feature_enabled(X86_FEATURE_USER_SHSTK) || | 
|---|
| 224 | !ssp_active(target, regset)) | 
|---|
| 225 | return -ENODEV; | 
|---|
| 226 |  | 
|---|
| 227 | if (pos != 0 || count != sizeof(user_ssp)) | 
|---|
| 228 | return -EINVAL; | 
|---|
| 229 |  | 
|---|
| 230 | r = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_ssp, 0, -1); | 
|---|
| 231 | if (r) | 
|---|
| 232 | return r; | 
|---|
| 233 |  | 
|---|
| 234 | /* | 
|---|
| 235 | * Some kernel instructions (IRET, etc) can cause exceptions in the case | 
|---|
| 236 | * of disallowed CET register values. Just prevent invalid values. | 
|---|
| 237 | */ | 
|---|
| 238 | if (user_ssp >= TASK_SIZE_MAX || !IS_ALIGNED(user_ssp, 8)) | 
|---|
| 239 | return -EINVAL; | 
|---|
| 240 |  | 
|---|
| 241 | fpu_force_restore(fpu); | 
|---|
| 242 |  | 
|---|
| 243 | cetregs = get_xsave_addr(xsave, XFEATURE_CET_USER); | 
|---|
| 244 | if (WARN_ON(!cetregs)) { | 
|---|
| 245 | /* | 
|---|
| 246 | * This shouldn't ever be NULL because shadow stack was | 
|---|
| 247 | * verified to be enabled above. This means | 
|---|
| 248 | * MSR_IA32_U_CET.CET_SHSTK_EN should be 1 and so | 
|---|
| 249 | * XFEATURE_CET_USER should not be in the init state. | 
|---|
| 250 | */ | 
|---|
| 251 | return -ENODEV; | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | cetregs->user_ssp = user_ssp; | 
|---|
| 255 | return 0; | 
|---|
| 256 | } | 
|---|
| 257 | #endif /* CONFIG_X86_USER_SHADOW_STACK */ | 
|---|
| 258 |  | 
|---|
| 259 | #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION | 
|---|
| 260 |  | 
|---|
| 261 | /* | 
|---|
| 262 | * FPU tag word conversions. | 
|---|
| 263 | */ | 
|---|
| 264 |  | 
|---|
| 265 | static inline unsigned short twd_i387_to_fxsr(unsigned short twd) | 
|---|
| 266 | { | 
|---|
| 267 | unsigned int tmp; /* to avoid 16 bit prefixes in the code */ | 
|---|
| 268 |  | 
|---|
| 269 | /* Transform each pair of bits into 01 (valid) or 00 (empty) */ | 
|---|
| 270 | tmp = ~twd; | 
|---|
| 271 | tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ | 
|---|
| 272 | /* and move the valid bits to the lower byte. */ | 
|---|
| 273 | tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ | 
|---|
| 274 | tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ | 
|---|
| 275 | tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ | 
|---|
| 276 |  | 
|---|
| 277 | return tmp; | 
|---|
| 278 | } | 
|---|
| 279 |  | 
|---|
| 280 | #define FPREG_ADDR(f, n)	((void *)&(f)->st_space + (n) * 16) | 
|---|
| 281 | #define FP_EXP_TAG_VALID	0 | 
|---|
| 282 | #define FP_EXP_TAG_ZERO		1 | 
|---|
| 283 | #define FP_EXP_TAG_SPECIAL	2 | 
|---|
| 284 | #define FP_EXP_TAG_EMPTY	3 | 
|---|
| 285 |  | 
|---|
| 286 | static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave) | 
|---|
| 287 | { | 
|---|
| 288 | struct _fpxreg *st; | 
|---|
| 289 | u32 tos = (fxsave->swd >> 11) & 7; | 
|---|
| 290 | u32 twd = (unsigned long) fxsave->twd; | 
|---|
| 291 | u32 tag; | 
|---|
| 292 | u32 ret = 0xffff0000u; | 
|---|
| 293 | int i; | 
|---|
| 294 |  | 
|---|
| 295 | for (i = 0; i < 8; i++, twd >>= 1) { | 
|---|
| 296 | if (twd & 0x1) { | 
|---|
| 297 | st = FPREG_ADDR(fxsave, (i - tos) & 7); | 
|---|
| 298 |  | 
|---|
| 299 | switch (st->exponent & 0x7fff) { | 
|---|
| 300 | case 0x7fff: | 
|---|
| 301 | tag = FP_EXP_TAG_SPECIAL; | 
|---|
| 302 | break; | 
|---|
| 303 | case 0x0000: | 
|---|
| 304 | if (!st->significand[0] && | 
|---|
| 305 | !st->significand[1] && | 
|---|
| 306 | !st->significand[2] && | 
|---|
| 307 | !st->significand[3]) | 
|---|
| 308 | tag = FP_EXP_TAG_ZERO; | 
|---|
| 309 | else | 
|---|
| 310 | tag = FP_EXP_TAG_SPECIAL; | 
|---|
| 311 | break; | 
|---|
| 312 | default: | 
|---|
| 313 | if (st->significand[3] & 0x8000) | 
|---|
| 314 | tag = FP_EXP_TAG_VALID; | 
|---|
| 315 | else | 
|---|
| 316 | tag = FP_EXP_TAG_SPECIAL; | 
|---|
| 317 | break; | 
|---|
| 318 | } | 
|---|
| 319 | } else { | 
|---|
| 320 | tag = FP_EXP_TAG_EMPTY; | 
|---|
| 321 | } | 
|---|
| 322 | ret |= tag << (2 * i); | 
|---|
| 323 | } | 
|---|
| 324 | return ret; | 
|---|
| 325 | } | 
|---|
| 326 |  | 
|---|
| 327 | /* | 
|---|
| 328 | * FXSR floating point environment conversions. | 
|---|
| 329 | */ | 
|---|
| 330 |  | 
|---|
| 331 | static void __convert_from_fxsr(struct user_i387_ia32_struct *env, | 
|---|
| 332 | struct task_struct *tsk, | 
|---|
| 333 | struct fxregs_state *fxsave) | 
|---|
| 334 | { | 
|---|
| 335 | struct _fpreg *to = (struct _fpreg *) &env->st_space[0]; | 
|---|
| 336 | struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0]; | 
|---|
| 337 | int i; | 
|---|
| 338 |  | 
|---|
| 339 | env->cwd = fxsave->cwd | 0xffff0000u; | 
|---|
| 340 | env->swd = fxsave->swd | 0xffff0000u; | 
|---|
| 341 | env->twd = twd_fxsr_to_i387(fxsave); | 
|---|
| 342 |  | 
|---|
| 343 | #ifdef CONFIG_X86_64 | 
|---|
| 344 | env->fip = fxsave->rip; | 
|---|
| 345 | env->foo = fxsave->rdp; | 
|---|
| 346 | /* | 
|---|
| 347 | * should be actually ds/cs at fpu exception time, but | 
|---|
| 348 | * that information is not available in 64bit mode. | 
|---|
| 349 | */ | 
|---|
| 350 | env->fcs = task_pt_regs(tsk)->cs; | 
|---|
| 351 | if (tsk == current) { | 
|---|
| 352 | savesegment(ds, env->fos); | 
|---|
| 353 | } else { | 
|---|
| 354 | env->fos = tsk->thread.ds; | 
|---|
| 355 | } | 
|---|
| 356 | env->fos |= 0xffff0000; | 
|---|
| 357 | #else | 
|---|
| 358 | env->fip = fxsave->fip; | 
|---|
| 359 | env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16); | 
|---|
| 360 | env->foo = fxsave->foo; | 
|---|
| 361 | env->fos = fxsave->fos; | 
|---|
| 362 | #endif | 
|---|
| 363 |  | 
|---|
| 364 | for (i = 0; i < 8; ++i) | 
|---|
| 365 | memcpy(to: &to[i], from: &from[i], len: sizeof(to[0])); | 
|---|
| 366 | } | 
|---|
| 367 |  | 
|---|
| 368 | void | 
|---|
| 369 | convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) | 
|---|
| 370 | { | 
|---|
| 371 | __convert_from_fxsr(env, tsk, fxsave: &x86_task_fpu(task: tsk)->fpstate->regs.fxsave); | 
|---|
| 372 | } | 
|---|
| 373 |  | 
|---|
| 374 | void convert_to_fxsr(struct fxregs_state *fxsave, | 
|---|
| 375 | const struct user_i387_ia32_struct *env) | 
|---|
| 376 |  | 
|---|
| 377 | { | 
|---|
| 378 | struct _fpreg *from = (struct _fpreg *) &env->st_space[0]; | 
|---|
| 379 | struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0]; | 
|---|
| 380 | int i; | 
|---|
| 381 |  | 
|---|
| 382 | fxsave->cwd = env->cwd; | 
|---|
| 383 | fxsave->swd = env->swd; | 
|---|
| 384 | fxsave->twd = twd_i387_to_fxsr(twd: env->twd); | 
|---|
| 385 | fxsave->fop = (u16) ((u32) env->fcs >> 16); | 
|---|
| 386 | #ifdef CONFIG_X86_64 | 
|---|
| 387 | fxsave->rip = env->fip; | 
|---|
| 388 | fxsave->rdp = env->foo; | 
|---|
| 389 | /* cs and ds ignored */ | 
|---|
| 390 | #else | 
|---|
| 391 | fxsave->fip = env->fip; | 
|---|
| 392 | fxsave->fcs = (env->fcs & 0xffff); | 
|---|
| 393 | fxsave->foo = env->foo; | 
|---|
| 394 | fxsave->fos = env->fos; | 
|---|
| 395 | #endif | 
|---|
| 396 |  | 
|---|
| 397 | for (i = 0; i < 8; ++i) | 
|---|
| 398 | memcpy(to: &to[i], from: &from[i], len: sizeof(from[0])); | 
|---|
| 399 | } | 
|---|
| 400 |  | 
|---|
| 401 | int fpregs_get(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 402 | struct membuf to) | 
|---|
| 403 | { | 
|---|
| 404 | struct fpu *fpu = x86_task_fpu(task: target); | 
|---|
| 405 | struct user_i387_ia32_struct env; | 
|---|
| 406 | struct fxregs_state fxsave, *fx; | 
|---|
| 407 |  | 
|---|
| 408 | sync_fpstate(fpu); | 
|---|
| 409 |  | 
|---|
| 410 | if (!cpu_feature_enabled(X86_FEATURE_FPU)) | 
|---|
| 411 | return fpregs_soft_get(target, regset, to); | 
|---|
| 412 |  | 
|---|
| 413 | if (!cpu_feature_enabled(X86_FEATURE_FXSR)) { | 
|---|
| 414 | return membuf_write(s: &to, v: &fpu->fpstate->regs.fsave, | 
|---|
| 415 | size: sizeof(struct fregs_state)); | 
|---|
| 416 | } | 
|---|
| 417 |  | 
|---|
| 418 | if (use_xsave()) { | 
|---|
| 419 | struct membuf mb = { .p = &fxsave, .left = sizeof(fxsave) }; | 
|---|
| 420 |  | 
|---|
| 421 | /* Handle init state optimized xstate correctly */ | 
|---|
| 422 | copy_xstate_to_uabi_buf(to: mb, tsk: target, mode: XSTATE_COPY_FP); | 
|---|
| 423 | fx = &fxsave; | 
|---|
| 424 | } else { | 
|---|
| 425 | fx = &fpu->fpstate->regs.fxsave; | 
|---|
| 426 | } | 
|---|
| 427 |  | 
|---|
| 428 | __convert_from_fxsr(env: &env, tsk: target, fxsave: fx); | 
|---|
| 429 | return membuf_write(s: &to, v: &env, size: sizeof(env)); | 
|---|
| 430 | } | 
|---|
| 431 |  | 
|---|
| 432 | int fpregs_set(struct task_struct *target, const struct user_regset *regset, | 
|---|
| 433 | unsigned int pos, unsigned int count, | 
|---|
| 434 | const void *kbuf, const void __user *ubuf) | 
|---|
| 435 | { | 
|---|
| 436 | struct fpu *fpu = x86_task_fpu(task: target); | 
|---|
| 437 | struct user_i387_ia32_struct env; | 
|---|
| 438 | int ret; | 
|---|
| 439 |  | 
|---|
| 440 | /* No funny business with partial or oversized writes is permitted. */ | 
|---|
| 441 | if (pos != 0 || count != sizeof(struct user_i387_ia32_struct)) | 
|---|
| 442 | return -EINVAL; | 
|---|
| 443 |  | 
|---|
| 444 | if (!cpu_feature_enabled(X86_FEATURE_FPU)) | 
|---|
| 445 | return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); | 
|---|
| 446 |  | 
|---|
| 447 | ret = user_regset_copyin(pos: &pos, count: &count, kbuf: &kbuf, ubuf: &ubuf, data: &env, start_pos: 0, end_pos: -1); | 
|---|
| 448 | if (ret) | 
|---|
| 449 | return ret; | 
|---|
| 450 |  | 
|---|
| 451 | fpu_force_restore(fpu); | 
|---|
| 452 |  | 
|---|
| 453 | if (cpu_feature_enabled(X86_FEATURE_FXSR)) | 
|---|
| 454 | convert_to_fxsr(fxsave: &fpu->fpstate->regs.fxsave, env: &env); | 
|---|
| 455 | else | 
|---|
| 456 | memcpy(to: &fpu->fpstate->regs.fsave, from: &env, len: sizeof(env)); | 
|---|
| 457 |  | 
|---|
| 458 | /* | 
|---|
| 459 | * Update the header bit in the xsave header, indicating the | 
|---|
| 460 | * presence of FP. | 
|---|
| 461 | */ | 
|---|
| 462 | if (cpu_feature_enabled(X86_FEATURE_XSAVE)) | 
|---|
| 463 | fpu->fpstate->regs.xsave.header.xfeatures |= XFEATURE_MASK_FP; | 
|---|
| 464 |  | 
|---|
| 465 | return 0; | 
|---|
| 466 | } | 
|---|
| 467 |  | 
|---|
| 468 | #endif	/* CONFIG_X86_32 || CONFIG_IA32_EMULATION */ | 
|---|
| 469 |  | 
|---|