2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_FPU_INTERNAL_H
11 #define _ASM_X86_FPU_INTERNAL_H
13 #include <linux/compat.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
18 #include <asm/fpu/api.h>
19 #include <asm/fpu/xstate.h>
20 #include <asm/cpufeature.h>
21 #include <asm/trace/fpu.h>
24 * High level FPU state handling functions:
26 extern void fpu__activate_curr(struct fpu *fpu);
27 extern void fpu__activate_fpstate_read(struct fpu *fpu);
28 extern void fpu__activate_fpstate_write(struct fpu *fpu);
29 extern void fpu__current_fpstate_write_begin(void);
30 extern void fpu__current_fpstate_write_end(void);
31 extern void fpu__save(struct fpu *fpu);
32 extern void fpu__restore(struct fpu *fpu);
33 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
34 extern void fpu__drop(struct fpu *fpu);
35 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
36 extern void fpu__clear(struct fpu *fpu);
37 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
38 extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
41 * Boot time FPU initialization functions:
43 extern void fpu__init_cpu(void);
44 extern void fpu__init_system_xstate(void);
45 extern void fpu__init_cpu_xstate(void);
46 extern void fpu__init_system(struct cpuinfo_x86 *c);
47 extern void fpu__init_check_bugs(void);
48 extern void fpu__resume_cpu(void);
49 extern u64 fpu__get_supported_xfeatures_mask(void);
54 #ifdef CONFIG_X86_DEBUG_FPU
55 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
57 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
61 * FPU related CPU feature flag helper routines:
63 static __always_inline __pure bool use_eager_fpu(void)
65 return static_cpu_has(X86_FEATURE_EAGER_FPU);
68 static __always_inline __pure bool use_xsaveopt(void)
70 return static_cpu_has(X86_FEATURE_XSAVEOPT);
73 static __always_inline __pure bool use_xsave(void)
75 return static_cpu_has(X86_FEATURE_XSAVE);
78 static __always_inline __pure bool use_fxsr(void)
80 return static_cpu_has(X86_FEATURE_FXSR);
84 * fpstate handling functions:
87 extern union fpregs_state init_fpstate;
89 extern void fpstate_init(union fpregs_state *state);
90 #ifdef CONFIG_MATH_EMULATION
91 extern void fpstate_init_soft(struct swregs_state *soft);
93 static inline void fpstate_init_soft(struct swregs_state *soft) {}
95 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
98 fx->mxcsr = MXCSR_DEFAULT;
100 extern void fpstate_sanitize_xstate(struct fpu *fpu);
102 #define user_insn(insn, output, input...) \
105 asm volatile(ASM_STAC "\n" \
107 "2: " ASM_CLAC "\n" \
108 ".section .fixup,\"ax\"\n" \
109 "3: movl $-1,%[err]\n" \
112 _ASM_EXTABLE(1b, 3b) \
113 : [err] "=r" (err), output \
118 #define check_insn(insn, output, input...) \
121 asm volatile("1:" #insn "\n\t" \
123 ".section .fixup,\"ax\"\n" \
124 "3: movl $-1,%[err]\n" \
127 _ASM_EXTABLE(1b, 3b) \
128 : [err] "=r" (err), output \
133 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
135 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
138 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
140 if (config_enabled(CONFIG_X86_32))
141 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
142 else if (config_enabled(CONFIG_AS_FXSAVEQ))
143 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
145 /* See comment in copy_fxregs_to_kernel() below. */
146 return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
149 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
153 if (config_enabled(CONFIG_X86_32)) {
154 err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
156 if (config_enabled(CONFIG_AS_FXSAVEQ)) {
157 err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
159 /* See comment in copy_fxregs_to_kernel() below. */
160 err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
163 /* Copying from a kernel buffer to FPU registers should never fail: */
167 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
169 if (config_enabled(CONFIG_X86_32))
170 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
171 else if (config_enabled(CONFIG_AS_FXSAVEQ))
172 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
174 /* See comment in copy_fxregs_to_kernel() below. */
175 return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
179 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
181 int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
186 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
188 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
191 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
193 if (config_enabled(CONFIG_X86_32))
194 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
195 else if (config_enabled(CONFIG_AS_FXSAVEQ))
196 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
198 /* Using "rex64; fxsave %0" is broken because, if the memory
199 * operand uses any extended registers for addressing, a second
200 * REX prefix will be generated (to the assembler, rex64
201 * followed by semicolon is a separate instruction), and hence
202 * the 64-bitness is lost.
204 * Using "fxsaveq %0" would be the ideal choice, but is only
205 * supported starting with gas 2.16.
207 * Using, as a workaround, the properly prefixed form below
208 * isn't accepted by any binutils version so far released,
209 * complaining that the same type of prefix is used twice if
210 * an extended register is needed for addressing (fix submitted
211 * to mainline 2005-11-21).
213 * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
215 * This, however, we can work around by forcing the compiler to
216 * select an addressing mode that doesn't require extended
219 asm volatile( "rex64/fxsave (%[fx])"
220 : "=m" (fpu->state.fxsave)
221 : [fx] "R" (&fpu->state.fxsave));
225 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
226 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
227 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
228 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
229 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
230 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
232 #define XSTATE_OP(op, st, lmask, hmask, err) \
233 asm volatile("1:" op "\n\t" \
234 "xor %[err], %[err]\n" \
236 ".pushsection .fixup,\"ax\"\n\t" \
237 "3: movl $-2,%[err]\n\t" \
240 _ASM_EXTABLE(1b, 3b) \
242 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
246 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
247 * format and supervisor states in addition to modified optimization in
250 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
251 * supports modified optimization which is not supported by XSAVE.
253 * We use XSAVE as a fallback.
255 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
256 * original instruction which gets replaced. We need to use it here as the
257 * address of the instruction where we might get an exception at.
259 #define XSTATE_XSAVE(st, lmask, hmask, err) \
260 asm volatile(ALTERNATIVE_2(XSAVE, \
261 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
262 XSAVES, X86_FEATURE_XSAVES) \
264 "xor %[err], %[err]\n" \
266 ".pushsection .fixup,\"ax\"\n" \
267 "4: movl $-2, %[err]\n" \
270 _ASM_EXTABLE(661b, 4b) \
272 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
276 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
279 #define XSTATE_XRESTORE(st, lmask, hmask, err) \
280 asm volatile(ALTERNATIVE(XRSTOR, \
281 XRSTORS, X86_FEATURE_XSAVES) \
283 "xor %[err], %[err]\n" \
285 ".pushsection .fixup,\"ax\"\n" \
286 "4: movl $-2, %[err]\n" \
289 _ASM_EXTABLE(661b, 4b) \
291 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
295 * This function is called only during boot time when x86 caps are not set
296 * up and alternative can not be used yet.
298 static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
302 u32 hmask = mask >> 32;
305 WARN_ON(system_state != SYSTEM_BOOTING);
307 if (static_cpu_has(X86_FEATURE_XSAVES))
308 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
310 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
312 /* We should never fault when copying to a kernel buffer: */
317 * This function is called only during boot time when x86 caps are not set
318 * up and alternative can not be used yet.
320 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
324 u32 hmask = mask >> 32;
327 WARN_ON(system_state != SYSTEM_BOOTING);
329 if (static_cpu_has(X86_FEATURE_XSAVES))
330 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
332 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
334 /* We should never fault when copying from a kernel buffer: */
339 * Save processor xstate to xsave area.
341 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
345 u32 hmask = mask >> 32;
348 WARN_ON(!alternatives_patched);
350 XSTATE_XSAVE(xstate, lmask, hmask, err);
352 /* We should never fault when copying to a kernel buffer: */
357 * Restore processor xstate from xsave area.
359 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
362 u32 hmask = mask >> 32;
365 XSTATE_XRESTORE(xstate, lmask, hmask, err);
367 /* We should never fault when copying from a kernel buffer: */
372 * Save xstate to user space xsave area.
374 * We don't use modified optimization because xrstor/xrstors might track
375 * a different application.
377 * We don't use compacted format xsave area for
378 * backward compatibility for old applications which don't understand
379 * compacted format of xsave area.
381 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
386 * Clear the xsave header first, so that reserved fields are
387 * initialized to zero.
389 err = __clear_user(&buf->header, sizeof(buf->header));
394 XSTATE_OP(XSAVE, buf, -1, -1, err);
401 * Restore xstate from user space xsave area.
403 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
405 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
407 u32 hmask = mask >> 32;
411 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
418 * These must be called with preempt disabled. Returns
419 * 'true' if the FPU state is still intact and we can
420 * keep registers active.
422 * The legacy FNSAVE instruction cleared all FPU state
423 * unconditionally, so registers are essentially destroyed.
424 * Modern FPU state can be kept in registers, if there are
425 * no pending FP exceptions.
427 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
429 if (likely(use_xsave())) {
430 copy_xregs_to_kernel(&fpu->state.xsave);
434 if (likely(use_fxsr())) {
435 copy_fxregs_to_kernel(fpu);
440 * Legacy FPU register saving, FNSAVE always clears FPU registers,
441 * so we have to mark them inactive:
443 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
448 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
451 copy_kernel_to_xregs(&fpstate->xsave, -1);
454 copy_kernel_to_fxregs(&fpstate->fxsave);
456 copy_kernel_to_fregs(&fpstate->fsave);
460 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
463 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
464 * pending. Clear the x87 state here by setting it to fixed values.
465 * "m" is a random variable that should be in L1.
467 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
471 "fildl %P[addr]" /* set F?P to defined value */
472 : : [addr] "m" (fpstate));
475 __copy_kernel_to_fpregs(fpstate);
478 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
481 * FPU context switch related helper methods:
484 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
487 * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
490 * This will disable any lazy FPU state restore of the current FPU state,
491 * but if the current thread owns the FPU, it will still be saved by.
493 static inline void __cpu_disable_lazy_restore(unsigned int cpu)
495 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
498 static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
500 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
505 * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
506 * idiom, which is then paired with the sw-flag (fpregs_active) later on:
509 static inline void __fpregs_activate_hw(void)
511 if (!use_eager_fpu())
515 static inline void __fpregs_deactivate_hw(void)
517 if (!use_eager_fpu())
521 /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
522 static inline void __fpregs_deactivate(struct fpu *fpu)
524 WARN_ON_FPU(!fpu->fpregs_active);
526 fpu->fpregs_active = 0;
527 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
528 trace_x86_fpu_regs_deactivated(fpu);
531 /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
532 static inline void __fpregs_activate(struct fpu *fpu)
534 WARN_ON_FPU(fpu->fpregs_active);
536 fpu->fpregs_active = 1;
537 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
538 trace_x86_fpu_regs_activated(fpu);
542 * The question "does this thread have fpu access?"
543 * is slightly racy, since preemption could come in
544 * and revoke it immediately after the test.
546 * However, even in that very unlikely scenario,
547 * we can just assume we have FPU access - typically
548 * to save the FP state - we'll just take a #NM
549 * fault and get the FPU access back.
551 static inline int fpregs_active(void)
553 return current->thread.fpu.fpregs_active;
557 * Encapsulate the CR0.TS handling together with the
560 * These generally need preemption protection to work,
561 * do try to avoid using these on their own.
563 static inline void fpregs_activate(struct fpu *fpu)
565 __fpregs_activate_hw();
566 __fpregs_activate(fpu);
569 static inline void fpregs_deactivate(struct fpu *fpu)
571 __fpregs_deactivate(fpu);
572 __fpregs_deactivate_hw();
576 * FPU state switching for scheduling.
578 * This is a two-stage process:
580 * - switch_fpu_prepare() saves the old state and
581 * sets the new state of the CR0.TS bit. This is
582 * done within the context of the old process.
584 * - switch_fpu_finish() restores the new state as
587 typedef struct { int preload; } fpu_switch_t;
589 static inline fpu_switch_t
590 switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
595 * If the task has used the math, pre-load the FPU on xsave processors
596 * or if the past 5 consecutive context-switches used math.
598 fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
599 new_fpu->fpstate_active &&
600 (use_eager_fpu() || new_fpu->counter > 5);
602 if (old_fpu->fpregs_active) {
603 if (!copy_fpregs_to_fpstate(old_fpu))
604 old_fpu->last_cpu = -1;
606 old_fpu->last_cpu = cpu;
608 /* But leave fpu_fpregs_owner_ctx! */
609 old_fpu->fpregs_active = 0;
610 trace_x86_fpu_regs_deactivated(old_fpu);
612 /* Don't change CR0.TS if we just switch! */
615 __fpregs_activate(new_fpu);
616 trace_x86_fpu_regs_activated(new_fpu);
617 prefetch(&new_fpu->state);
619 __fpregs_deactivate_hw();
622 old_fpu->counter = 0;
623 old_fpu->last_cpu = -1;
626 if (fpu_want_lazy_restore(new_fpu, cpu))
629 prefetch(&new_fpu->state);
630 fpregs_activate(new_fpu);
637 * Misc helper functions:
641 * By the time this gets called, we've already cleared CR0.TS and
642 * given the process the FPU if we are going to preload the FPU
643 * state - all we need to do is to conditionally restore the register
646 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
648 if (fpu_switch.preload)
649 copy_kernel_to_fpregs(&new_fpu->state);
653 * Needs to be preemption-safe.
655 * NOTE! user_fpu_begin() must be used only immediately before restoring
656 * the save state. It does not do any saving/restoring on its own. In
657 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
658 * the task can lose the FPU right after preempt_enable().
660 static inline void user_fpu_begin(void)
662 struct fpu *fpu = ¤t->thread.fpu;
665 if (!fpregs_active())
666 fpregs_activate(fpu);
671 * MXCSR and XCR definitions:
674 extern unsigned int mxcsr_feature_mask;
676 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
678 static inline u64 xgetbv(u32 index)
682 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
683 : "=a" (eax), "=d" (edx)
685 return eax + ((u64)edx << 32);
688 static inline void xsetbv(u32 index, u64 value)
691 u32 edx = value >> 32;
693 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
694 : : "a" (eax), "d" (edx), "c" (index));
697 #endif /* _ASM_X86_FPU_INTERNAL_H */