2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/ptrace.h>
26 #include <linux/string.h>
27 #include <linux/delay.h>
28 #include <linux/errno.h>
29 #include <linux/kexec.h>
30 #include <linux/sched.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/bug.h>
34 #include <linux/nmi.h>
36 #include <linux/smp.h>
40 #include <linux/ioport.h>
41 #include <linux/eisa.h>
44 #if defined(CONFIG_EDAC)
45 #include <linux/edac.h>
48 #include <asm/kmemcheck.h>
49 #include <asm/stacktrace.h>
50 #include <asm/processor.h>
51 #include <asm/debugreg.h>
52 #include <linux/atomic.h>
53 #include <asm/ftrace.h>
54 #include <asm/traps.h>
57 #include <asm/fpu-internal.h>
59 #include <asm/fixmap.h>
60 #include <asm/mach_traps.h>
61 #include <asm/alternative.h>
64 #include <asm/x86_init.h>
65 #include <asm/pgalloc.h>
66 #include <asm/proto.h>
68 /* No need to be aligned, but done to keep all IDTs defined the same way. */
69 gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
71 #include <asm/processor-flags.h>
72 #include <asm/setup.h>
74 asmlinkage int system_call(void);
77 /* Must be page-aligned because the real IDT is used in a fixmap. */
78 gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
80 DECLARE_BITMAP(used_vectors, NR_VECTORS);
81 EXPORT_SYMBOL_GPL(used_vectors);
83 static inline void conditional_sti(struct pt_regs *regs)
85 if (regs->flags & X86_EFLAGS_IF)
89 static inline void preempt_conditional_sti(struct pt_regs *regs)
92 if (regs->flags & X86_EFLAGS_IF)
96 static inline void conditional_cli(struct pt_regs *regs)
98 if (regs->flags & X86_EFLAGS_IF)
102 static inline void preempt_conditional_cli(struct pt_regs *regs)
104 if (regs->flags & X86_EFLAGS_IF)
110 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
111 struct pt_regs *regs, long error_code)
114 if (regs->flags & X86_VM_MASK) {
116 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
117 * On nmi (interrupt 2), do_trap should not be called.
119 if (trapnr < X86_TRAP_UD) {
120 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
127 if (!user_mode(regs)) {
128 if (!fixup_exception(regs)) {
129 tsk->thread.error_code = error_code;
130 tsk->thread.trap_nr = trapnr;
131 die(str, regs, error_code);
139 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
142 unsigned long siaddr;
147 return SEND_SIG_PRIV;
163 info->si_signo = signr;
165 info->si_code = sicode;
166 info->si_addr = (void __user *)siaddr;
170 static void __kprobes
171 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
172 long error_code, siginfo_t *info)
174 struct task_struct *tsk = current;
177 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
180 * We want error_code and trap_nr set for userspace faults and
181 * kernelspace faults which result in die(), but not
182 * kernelspace faults which are fixed up. die() gives the
183 * process no chance to handle the signal and notice the
184 * kernel fault information, so that won't result in polluting
185 * the information about previously queued, but not yet
186 * delivered, faults. See also do_general_protection below.
188 tsk->thread.error_code = error_code;
189 tsk->thread.trap_nr = trapnr;
192 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
193 printk_ratelimit()) {
194 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
195 tsk->comm, tsk->pid, str,
196 regs->ip, regs->sp, error_code);
197 print_vma_addr(" in ", regs->ip);
202 force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
205 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
206 unsigned long trapnr, int signr)
208 enum ctx_state prev_state = exception_enter();
211 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
213 conditional_sti(regs);
214 do_trap(trapnr, signr, str, regs, error_code,
215 fill_trap_info(regs, signr, trapnr, &info));
218 exception_exit(prev_state);
221 #define DO_ERROR(trapnr, signr, str, name) \
222 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
224 do_error_trap(regs, error_code, str, trapnr, signr); \
227 #define DO_ERROR_INFO(trapnr, signr, str, name) \
228 dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
230 do_error_trap(regs, error_code, str, trapnr, signr); \
233 DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error)
234 DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow)
235 DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds)
236 DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
237 DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
238 DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
239 DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
241 DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
243 DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
246 /* Runs on IST stack */
247 dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
249 enum ctx_state prev_state;
251 prev_state = exception_enter();
252 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
253 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
254 preempt_conditional_sti(regs);
255 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
256 preempt_conditional_cli(regs);
258 exception_exit(prev_state);
261 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
263 static const char str[] = "double fault";
264 struct task_struct *tsk = current;
267 /* Return not checked because double check cannot be ignored */
268 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
270 tsk->thread.error_code = error_code;
271 tsk->thread.trap_nr = X86_TRAP_DF;
273 #ifdef CONFIG_DOUBLEFAULT
274 df_debug(regs, error_code);
277 * This is always a kernel trap and never fixable (and thus must
281 die(str, regs, error_code);
285 dotraplinkage void __kprobes
286 do_general_protection(struct pt_regs *regs, long error_code)
288 struct task_struct *tsk;
289 enum ctx_state prev_state;
291 prev_state = exception_enter();
292 conditional_sti(regs);
295 if (regs->flags & X86_VM_MASK) {
297 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
303 if (!user_mode(regs)) {
304 if (fixup_exception(regs))
307 tsk->thread.error_code = error_code;
308 tsk->thread.trap_nr = X86_TRAP_GP;
309 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
310 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
311 die("general protection fault", regs, error_code);
315 tsk->thread.error_code = error_code;
316 tsk->thread.trap_nr = X86_TRAP_GP;
318 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
319 printk_ratelimit()) {
320 pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
321 tsk->comm, task_pid_nr(tsk),
322 regs->ip, regs->sp, error_code);
323 print_vma_addr(" in ", regs->ip);
327 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
329 exception_exit(prev_state);
332 /* May run on IST stack. */
333 dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
335 enum ctx_state prev_state;
337 #ifdef CONFIG_DYNAMIC_FTRACE
339 * ftrace must be first, everything else may cause a recursive crash.
340 * See note by declaration of modifying_ftrace_code in ftrace.c
342 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
343 ftrace_int3_handler(regs))
346 if (poke_int3_handler(regs))
349 prev_state = exception_enter();
350 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
351 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
352 SIGTRAP) == NOTIFY_STOP)
354 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
356 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
357 SIGTRAP) == NOTIFY_STOP)
361 * Let others (NMI) know that the debug stack is in use
362 * as we may switch to the interrupt stack.
364 debug_stack_usage_inc();
365 preempt_conditional_sti(regs);
366 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
367 preempt_conditional_cli(regs);
368 debug_stack_usage_dec();
370 exception_exit(prev_state);
375 * Help handler running on IST stack to switch back to user stack
376 * for scheduling or signal handling. The actual stack switch is done in
379 asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
381 struct pt_regs *regs = eregs;
382 /* Did already sync */
383 if (eregs == (struct pt_regs *)eregs->sp)
385 /* Exception from user space */
386 else if (user_mode(eregs))
387 regs = task_pt_regs(current);
389 * Exception from kernel and interrupts are enabled. Move to
390 * kernel process stack.
392 else if (eregs->flags & X86_EFLAGS_IF)
393 regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
401 * Our handling of the processor debug registers is non-trivial.
402 * We do not clear them on entry and exit from the kernel. Therefore
403 * it is possible to get a watchpoint trap here from inside the kernel.
404 * However, the code in ./ptrace.c has ensured that the user can
405 * only set watchpoints on userspace addresses. Therefore the in-kernel
406 * watchpoint trap can only occur in code which is reading/writing
407 * from user space. Such code must not hold kernel locks (since it
408 * can equally take a page fault), therefore it is safe to call
409 * force_sig_info even though that claims and releases locks.
411 * Code in ./signal.c ensures that the debug control register
412 * is restored before we deliver any signal, and therefore that
413 * user code runs with the correct debug control register even though
416 * Being careful here means that we don't have to be as careful in a
417 * lot of more complicated places (task switching can be a bit lazy
418 * about restoring all the debug state, and ptrace doesn't have to
419 * find every occurrence of the TF bit that could be saved away even
422 * May run on IST stack.
424 dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
426 struct task_struct *tsk = current;
427 enum ctx_state prev_state;
432 prev_state = exception_enter();
434 get_debugreg(dr6, 6);
436 /* Filter out all the reserved bits which are preset to 1 */
437 dr6 &= ~DR6_RESERVED;
440 * If dr6 has no reason to give us about the origin of this trap,
441 * then it's very likely the result of an icebp/int01 trap.
442 * User wants a sigtrap for that.
444 if (!dr6 && user_mode(regs))
447 /* Catch kmemcheck conditions first of all! */
448 if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
451 /* DR6 may or may not be cleared by the CPU */
455 * The processor cleared BTF, so don't mark that we need it set.
457 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
459 /* Store the virtualized DR6 value */
460 tsk->thread.debugreg6 = dr6;
462 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
463 SIGTRAP) == NOTIFY_STOP)
467 * Let others (NMI) know that the debug stack is in use
468 * as we may switch to the interrupt stack.
470 debug_stack_usage_inc();
472 /* It's safe to allow irq's after DR6 has been saved */
473 preempt_conditional_sti(regs);
475 if (regs->flags & X86_VM_MASK) {
476 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
478 preempt_conditional_cli(regs);
479 debug_stack_usage_dec();
484 * Single-stepping through system calls: ignore any exceptions in
485 * kernel space, but re-enable TF when returning to user mode.
487 * We already checked v86 mode above, so we can check for kernel mode
488 * by just checking the CPL of CS.
490 if ((dr6 & DR_STEP) && !user_mode(regs)) {
491 tsk->thread.debugreg6 &= ~DR_STEP;
492 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
493 regs->flags &= ~X86_EFLAGS_TF;
495 si_code = get_si_code(tsk->thread.debugreg6);
496 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
497 send_sigtrap(tsk, regs, error_code, si_code);
498 preempt_conditional_cli(regs);
499 debug_stack_usage_dec();
502 exception_exit(prev_state);
506 * Note that we play around with the 'TS' bit in an attempt to get
507 * the correct behaviour even in the presence of the asynchronous
510 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
512 struct task_struct *task = current;
515 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
518 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
520 conditional_sti(regs);
522 if (!user_mode_vm(regs))
524 if (!fixup_exception(regs)) {
525 task->thread.error_code = error_code;
526 task->thread.trap_nr = trapnr;
527 die(str, regs, error_code);
533 * Save the info for the exception handler and clear the error.
536 task->thread.trap_nr = trapnr;
537 task->thread.error_code = error_code;
538 info.si_signo = SIGFPE;
540 info.si_addr = (void __user *)regs->ip;
541 if (trapnr == X86_TRAP_MF) {
542 unsigned short cwd, swd;
544 * (~cwd & swd) will mask out exceptions that are not set to unmasked
545 * status. 0x3f is the exception bits in these regs, 0x200 is the
546 * C1 reg you need in case of a stack fault, 0x040 is the stack
547 * fault bit. We should only be taking one exception at a time,
548 * so if this combination doesn't produce any single exception,
549 * then we have a bad program that isn't synchronizing its FPU usage
550 * and it will suffer the consequences since we won't be able to
551 * fully reproduce the context of the exception
553 cwd = get_fpu_cwd(task);
554 swd = get_fpu_swd(task);
559 * The SIMD FPU exceptions are handled a little differently, as there
560 * is only a single status/control register. Thus, to determine which
561 * unmasked exception was caught we must mask the exception mask bits
562 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
564 unsigned short mxcsr = get_fpu_mxcsr(task);
565 err = ~(mxcsr >> 7) & mxcsr;
568 if (err & 0x001) { /* Invalid op */
570 * swd & 0x240 == 0x040: Stack Underflow
571 * swd & 0x240 == 0x240: Stack Overflow
572 * User must clear the SF bit (0x40) if set
574 info.si_code = FPE_FLTINV;
575 } else if (err & 0x004) { /* Divide by Zero */
576 info.si_code = FPE_FLTDIV;
577 } else if (err & 0x008) { /* Overflow */
578 info.si_code = FPE_FLTOVF;
579 } else if (err & 0x012) { /* Denormal, Underflow */
580 info.si_code = FPE_FLTUND;
581 } else if (err & 0x020) { /* Precision */
582 info.si_code = FPE_FLTRES;
585 * If we're using IRQ 13, or supposedly even some trap
586 * X86_TRAP_MF implementations, it's possible
587 * we get a spurious trap, which is not an error.
591 force_sig_info(SIGFPE, &info, task);
594 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
596 enum ctx_state prev_state;
598 prev_state = exception_enter();
599 math_error(regs, error_code, X86_TRAP_MF);
600 exception_exit(prev_state);
604 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
606 enum ctx_state prev_state;
608 prev_state = exception_enter();
609 math_error(regs, error_code, X86_TRAP_XF);
610 exception_exit(prev_state);
614 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
616 conditional_sti(regs);
618 /* No need to warn about this any longer. */
619 pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
623 asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
627 asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
632 * 'math_state_restore()' saves the current math information in the
633 * old math state array, and gets the new ones from the current task
635 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
636 * Don't touch unless you *really* know how it works.
638 * Must be called with kernel preemption disabled (eg with local
639 * local interrupts as in the case of do_device_not_available).
641 void math_state_restore(void)
643 struct task_struct *tsk = current;
645 if (!tsk_used_math(tsk)) {
648 * does a slab alloc which can sleep
654 do_group_exit(SIGKILL);
660 __thread_fpu_begin(tsk);
663 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
665 if (unlikely(restore_fpu_checking(tsk))) {
667 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
671 tsk->thread.fpu_counter++;
673 EXPORT_SYMBOL_GPL(math_state_restore);
675 dotraplinkage void __kprobes
676 do_device_not_available(struct pt_regs *regs, long error_code)
678 enum ctx_state prev_state;
680 prev_state = exception_enter();
681 BUG_ON(use_eager_fpu());
683 #ifdef CONFIG_MATH_EMULATION
684 if (read_cr0() & X86_CR0_EM) {
685 struct math_emu_info info = { };
687 conditional_sti(regs);
691 exception_exit(prev_state);
695 math_state_restore(); /* interrupts still off */
697 conditional_sti(regs);
699 exception_exit(prev_state);
703 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
706 enum ctx_state prev_state;
708 prev_state = exception_enter();
711 info.si_signo = SIGILL;
713 info.si_code = ILL_BADSTK;
715 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
716 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
717 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
720 exception_exit(prev_state);
724 /* Set of traps needed for early debugging. */
725 void __init early_trap_init(void)
727 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
728 /* int3 can be called from all */
729 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
731 set_intr_gate(X86_TRAP_PF, page_fault);
733 load_idt(&idt_descr);
736 void __init early_trap_pf_init(void)
739 set_intr_gate(X86_TRAP_PF, page_fault);
743 void __init trap_init(void)
748 void __iomem *p = early_ioremap(0x0FFFD9, 4);
750 if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
755 set_intr_gate(X86_TRAP_DE, divide_error);
756 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
757 /* int4 can be called from all */
758 set_system_intr_gate(X86_TRAP_OF, &overflow);
759 set_intr_gate(X86_TRAP_BR, bounds);
760 set_intr_gate(X86_TRAP_UD, invalid_op);
761 set_intr_gate(X86_TRAP_NM, device_not_available);
763 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
765 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
767 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
768 set_intr_gate(X86_TRAP_TS, invalid_TSS);
769 set_intr_gate(X86_TRAP_NP, segment_not_present);
770 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
771 set_intr_gate(X86_TRAP_GP, general_protection);
772 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
773 set_intr_gate(X86_TRAP_MF, coprocessor_error);
774 set_intr_gate(X86_TRAP_AC, alignment_check);
775 #ifdef CONFIG_X86_MCE
776 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
778 set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
780 /* Reserve all the builtin and the syscall vector: */
781 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
782 set_bit(i, used_vectors);
784 #ifdef CONFIG_IA32_EMULATION
785 set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
786 set_bit(IA32_SYSCALL_VECTOR, used_vectors);
790 set_system_trap_gate(SYSCALL_VECTOR, &system_call);
791 set_bit(SYSCALL_VECTOR, used_vectors);
795 * Set the IDT descriptor to a fixed read-only location, so that the
796 * "sidt" instruction will not leak the location of the kernel, and
797 * to defend the IDT against arbitrary memory write vulnerabilities.
798 * It will be reloaded in cpu_init() */
799 __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
800 idt_descr.address = fix_to_virt(FIX_RO_IDT);
803 * Should be a barrier for any external CPU state:
807 x86_init.irqs.trap_init();
810 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
811 set_nmi_gate(X86_TRAP_DB, &debug);
812 set_nmi_gate(X86_TRAP_BP, &int3);