1 #include <linux/errno.h>
2 #include <linux/kernel.h>
5 #include <linux/prctl.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/module.h>
10 #include <linux/clockchips.h>
11 #include <linux/random.h>
12 #include <linux/user-return-notifier.h>
13 #include <linux/dmi.h>
14 #include <linux/utsname.h>
15 #include <linux/stackprotector.h>
16 #include <linux/tick.h>
17 #include <linux/cpuidle.h>
18 #include <trace/events/power.h>
19 #include <linux/hw_breakpoint.h>
22 #include <asm/syscalls.h>
24 #include <asm/uaccess.h>
26 #include <asm/fpu-internal.h>
27 #include <asm/debugreg.h>
31 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
32 * no more per-task TSS's. The TSS size is kept cacheline-aligned
33 * so they are allowed to end up in the .data..cacheline_aligned
34 * section. Since TSS's are completely CPU-local, we want them
35 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
37 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40 static DEFINE_PER_CPU(unsigned char, is_idle);
41 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
43 void idle_notifier_register(struct notifier_block *n)
45 atomic_notifier_chain_register(&idle_notifier, n);
47 EXPORT_SYMBOL_GPL(idle_notifier_register);
49 void idle_notifier_unregister(struct notifier_block *n)
51 atomic_notifier_chain_unregister(&idle_notifier, n);
53 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
56 struct kmem_cache *task_xstate_cachep;
57 EXPORT_SYMBOL_GPL(task_xstate_cachep);
59 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
64 if (fpu_allocated(&src->thread.fpu)) {
65 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
66 ret = fpu_alloc(&dst->thread.fpu);
69 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
74 void free_thread_xstate(struct task_struct *tsk)
76 fpu_free(&tsk->thread.fpu);
79 void free_thread_info(struct thread_info *ti)
81 free_thread_xstate(ti->task);
82 free_pages((unsigned long)ti, THREAD_ORDER);
85 void arch_task_cache_init(void)
88 kmem_cache_create("task_xstate", xstate_size,
89 __alignof__(union thread_xstate),
90 SLAB_PANIC | SLAB_NOTRACK, NULL);
94 * Free current thread data structures etc..
96 void exit_thread(void)
98 struct task_struct *me = current;
99 struct thread_struct *t = &me->thread;
100 unsigned long *bp = t->io_bitmap_ptr;
103 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
105 t->io_bitmap_ptr = NULL;
106 clear_thread_flag(TIF_IO_BITMAP);
108 * Careful, clear this in the TSS too:
110 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
111 t->io_bitmap_max = 0;
117 void show_regs(struct pt_regs *regs)
119 show_registers(regs);
120 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0);
123 void show_regs_common(void)
125 const char *vendor, *product, *board;
127 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
130 product = dmi_get_system_info(DMI_PRODUCT_NAME);
134 /* Board Name is optional */
135 board = dmi_get_system_info(DMI_BOARD_NAME);
137 printk(KERN_CONT "\n");
138 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
139 current->pid, current->comm, print_tainted(),
140 init_utsname()->release,
141 (int)strcspn(init_utsname()->version, " "),
142 init_utsname()->version);
143 printk(KERN_CONT " %s %s", vendor, product);
145 printk(KERN_CONT "/%s", board);
146 printk(KERN_CONT "\n");
149 void flush_thread(void)
151 struct task_struct *tsk = current;
153 flush_ptrace_hw_breakpoint(tsk);
154 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
156 * Forget coprocessor state..
158 tsk->fpu_counter = 0;
163 static void hard_disable_TSC(void)
165 write_cr4(read_cr4() | X86_CR4_TSD);
168 void disable_TSC(void)
171 if (!test_and_set_thread_flag(TIF_NOTSC))
173 * Must flip the CPU state synchronously with
174 * TIF_NOTSC in the current running context.
180 static void hard_enable_TSC(void)
182 write_cr4(read_cr4() & ~X86_CR4_TSD);
185 static void enable_TSC(void)
188 if (test_and_clear_thread_flag(TIF_NOTSC))
190 * Must flip the CPU state synchronously with
191 * TIF_NOTSC in the current running context.
197 int get_tsc_mode(unsigned long adr)
201 if (test_thread_flag(TIF_NOTSC))
202 val = PR_TSC_SIGSEGV;
206 return put_user(val, (unsigned int __user *)adr);
209 int set_tsc_mode(unsigned int val)
211 if (val == PR_TSC_SIGSEGV)
213 else if (val == PR_TSC_ENABLE)
221 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
222 struct tss_struct *tss)
224 struct thread_struct *prev, *next;
226 prev = &prev_p->thread;
227 next = &next_p->thread;
229 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
230 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
231 unsigned long debugctl = get_debugctlmsr();
233 debugctl &= ~DEBUGCTLMSR_BTF;
234 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
235 debugctl |= DEBUGCTLMSR_BTF;
237 update_debugctlmsr(debugctl);
240 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
241 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
242 /* prev and next are different */
243 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
249 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
251 * Copy the relevant range of the IO bitmap.
252 * Normally this is 128 bytes or less:
254 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
255 max(prev->io_bitmap_max, next->io_bitmap_max));
256 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
258 * Clear any possible leftover bits:
260 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
262 propagate_user_return_notify(prev_p, next_p);
265 int sys_fork(struct pt_regs *regs)
267 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
271 * This is trivial, and on the face of it looks like it
272 * could equally well be done in user mode.
274 * Not so, for quite unobvious reasons - register pressure.
275 * In user mode vfork() cannot have a stack frame, and if
276 * done by calling the "clone()" system call directly, you
277 * do not have enough call-clobbered registers to hold all
278 * the information you need.
280 int sys_vfork(struct pt_regs *regs)
282 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
287 sys_clone(unsigned long clone_flags, unsigned long newsp,
288 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
292 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
296 * This gets run with %si containing the
297 * function to call, and %di containing
300 extern void kernel_thread_helper(void);
303 * Create a kernel thread
305 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
309 memset(®s, 0, sizeof(regs));
311 regs.si = (unsigned long) fn;
312 regs.di = (unsigned long) arg;
317 regs.fs = __KERNEL_PERCPU;
318 regs.gs = __KERNEL_STACK_CANARY;
320 regs.ss = __KERNEL_DS;
324 regs.ip = (unsigned long) kernel_thread_helper;
325 regs.cs = __KERNEL_CS | get_kernel_rpl();
326 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
328 /* Ok, create the new process.. */
329 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
331 EXPORT_SYMBOL(kernel_thread);
334 * sys_execve() executes a new program.
336 long sys_execve(const char __user *name,
337 const char __user *const __user *argv,
338 const char __user *const __user *envp, struct pt_regs *regs)
343 filename = getname(name);
344 error = PTR_ERR(filename);
345 if (IS_ERR(filename))
347 error = do_execve(filename, argv, envp, regs);
351 /* Make sure we don't return using sysenter.. */
352 set_thread_flag(TIF_IRET);
361 * Idle related variables and functions
363 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
364 EXPORT_SYMBOL(boot_option_idle_override);
367 * Powermanagement idle function, if any..
369 void (*pm_idle)(void);
370 #ifdef CONFIG_APM_MODULE
371 EXPORT_SYMBOL(pm_idle);
374 static inline int hlt_use_halt(void)
380 static inline void play_dead(void)
387 void enter_idle(void)
389 percpu_write(is_idle, 1);
390 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
393 static void __exit_idle(void)
395 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
397 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
400 /* Called from interrupts to signify idle end */
403 /* idle loop has pid 0 */
411 * The idle thread. There's no useful work to be
412 * done, so just try to conserve power and have a
413 * low exit latency (ie sit in a loop waiting for
414 * somebody to say that they'd like to reschedule)
419 * If we're the non-boot CPU, nothing set the stack canary up
420 * for us. CPU0 already has it initialized but no harm in
421 * doing it again. This is a good place for updating it, as
422 * we wont ever return from this function (so the invalid
423 * canaries already on the stack wont ever trigger).
425 boot_init_stack_canary();
426 current_thread_info()->status |= TS_POLLING;
429 tick_nohz_idle_enter();
431 while (!need_resched()) {
434 if (cpu_is_offline(smp_processor_id()))
438 * Idle routines should keep interrupts disabled
439 * from here on, until they go to idle.
440 * Otherwise, idle callbacks can misfire.
447 /* Don't trace irqs off for idle */
448 stop_critical_timings();
450 /* enter_idle() needs rcu for notifiers */
453 if (cpuidle_idle_call())
457 start_critical_timings();
459 /* In many cases the interrupt that ended idle
460 has already called exit_idle. But some idle
461 loops can be woken up without interrupt. */
465 tick_nohz_idle_exit();
466 preempt_enable_no_resched();
473 * We use this if we don't have any better
476 void default_idle(void)
478 if (hlt_use_halt()) {
479 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
480 trace_cpu_idle_rcuidle(1, smp_processor_id());
481 current_thread_info()->status &= ~TS_POLLING;
483 * TS_POLLING-cleared state must be visible before we
489 safe_halt(); /* enables interrupts racelessly */
492 current_thread_info()->status |= TS_POLLING;
493 trace_power_end_rcuidle(smp_processor_id());
494 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
497 /* loop is done by the caller */
501 #ifdef CONFIG_APM_MODULE
502 EXPORT_SYMBOL(default_idle);
505 bool set_pm_idle_to_default(void)
507 bool ret = !!pm_idle;
509 pm_idle = default_idle;
513 void stop_this_cpu(void *dummy)
519 set_cpu_online(smp_processor_id(), false);
520 disable_local_APIC();
523 if (hlt_works(smp_processor_id()))
528 static void do_nothing(void *unused)
533 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
534 * pm_idle and update to new pm_idle value. Required while changing pm_idle
535 * handler on SMP systems.
537 * Caller must have changed pm_idle to the new value before the call. Old
538 * pm_idle value will not be used by any CPU after the return of this function.
540 void cpu_idle_wait(void)
543 /* kick all the CPUs so that they exit out of pm_idle */
544 smp_call_function(do_nothing, NULL, 1);
546 EXPORT_SYMBOL_GPL(cpu_idle_wait);
548 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
549 static void mwait_idle(void)
551 if (!need_resched()) {
552 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
553 trace_cpu_idle_rcuidle(1, smp_processor_id());
554 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
555 clflush((void *)¤t_thread_info()->flags);
557 __monitor((void *)¤t_thread_info()->flags, 0, 0);
563 trace_power_end_rcuidle(smp_processor_id());
564 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
570 * On SMP it's slightly faster (but much more power-consuming!)
571 * to poll the ->work.need_resched flag instead of waiting for the
572 * cross-CPU IPI to arrive. Use this option with caution.
574 static void poll_idle(void)
576 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
577 trace_cpu_idle_rcuidle(0, smp_processor_id());
579 while (!need_resched())
581 trace_power_end_rcuidle(smp_processor_id());
582 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
586 * mwait selection logic:
588 * It depends on the CPU. For AMD CPUs that support MWAIT this is
589 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
590 * then depend on a clock divisor and current Pstate of the core. If
591 * all cores of a processor are in halt state (C1) the processor can
592 * enter the C1E (C1 enhanced) state. If mwait is used this will never
595 * idle=mwait overrides this decision and forces the usage of mwait.
598 #define MWAIT_INFO 0x05
599 #define MWAIT_ECX_EXTENDED_INFO 0x01
600 #define MWAIT_EDX_C1 0xf0
602 int mwait_usable(const struct cpuinfo_x86 *c)
604 u32 eax, ebx, ecx, edx;
606 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
609 if (c->cpuid_level < MWAIT_INFO)
612 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
613 /* Check, whether EDX has extended info about MWAIT */
614 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
618 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
621 return (edx & MWAIT_EDX_C1);
624 bool amd_e400_c1e_detected;
625 EXPORT_SYMBOL(amd_e400_c1e_detected);
627 static cpumask_var_t amd_e400_c1e_mask;
629 void amd_e400_remove_cpu(int cpu)
631 if (amd_e400_c1e_mask != NULL)
632 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
636 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
637 * pending message MSR. If we detect C1E, then we handle it the same
638 * way as C3 power states (local apic timer and TSC stop)
640 static void amd_e400_idle(void)
645 if (!amd_e400_c1e_detected) {
648 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
650 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
651 amd_e400_c1e_detected = true;
652 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
653 mark_tsc_unstable("TSC halt in AMD C1E");
654 printk(KERN_INFO "System has AMD C1E enabled\n");
658 if (amd_e400_c1e_detected) {
659 int cpu = smp_processor_id();
661 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
662 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
664 * Force broadcast so ACPI can not interfere.
666 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
668 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
671 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
676 * The switch back from broadcast mode needs to be
677 * called with interrupts disabled.
680 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
686 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
689 if (pm_idle == poll_idle && smp_num_siblings > 1) {
690 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
691 " performance may degrade.\n");
697 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
699 * One CPU supports mwait => All CPUs supports mwait
701 printk(KERN_INFO "using mwait in idle threads.\n");
702 pm_idle = mwait_idle;
703 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
704 /* E400: APIC timer interrupt does not wake up CPU from C1e */
705 printk(KERN_INFO "using AMD E400 aware idle routine\n");
706 pm_idle = amd_e400_idle;
708 pm_idle = default_idle;
711 void __init init_amd_e400_c1e_mask(void)
713 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
714 if (pm_idle == amd_e400_idle)
715 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
718 static int __init idle_setup(char *str)
723 if (!strcmp(str, "poll")) {
724 printk("using polling idle threads.\n");
726 boot_option_idle_override = IDLE_POLL;
727 } else if (!strcmp(str, "mwait")) {
728 boot_option_idle_override = IDLE_FORCE_MWAIT;
729 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
730 } else if (!strcmp(str, "halt")) {
732 * When the boot option of idle=halt is added, halt is
733 * forced to be used for CPU idle. In such case CPU C2/C3
734 * won't be used again.
735 * To continue to load the CPU idle driver, don't touch
736 * the boot_option_idle_override.
738 pm_idle = default_idle;
739 boot_option_idle_override = IDLE_HALT;
740 } else if (!strcmp(str, "nomwait")) {
742 * If the boot option of "idle=nomwait" is added,
743 * it means that mwait will be disabled for CPU C2/C3
744 * states. In such case it won't touch the variable
745 * of boot_option_idle_override.
747 boot_option_idle_override = IDLE_NOMWAIT;
753 early_param("idle", idle_setup);
755 unsigned long arch_align_stack(unsigned long sp)
757 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
758 sp -= get_random_int() % 8192;
762 unsigned long arch_randomize_brk(struct mm_struct *mm)
764 unsigned long range_end = mm->brk + 0x02000000;
765 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;