2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/export.h>
14 #include <linux/kthread.h>
15 #include <linux/stop_machine.h>
16 #include <linux/mutex.h>
17 #include <linux/gfp.h>
18 #include <linux/suspend.h>
20 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
22 void idle_notifier_register(struct notifier_block *n)
24 atomic_notifier_chain_register(&idle_notifier, n);
26 EXPORT_SYMBOL_GPL(idle_notifier_register);
28 void idle_notifier_unregister(struct notifier_block *n)
30 atomic_notifier_chain_unregister(&idle_notifier, n);
32 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
34 void idle_notifier_call_chain(unsigned long val)
36 atomic_notifier_call_chain(&idle_notifier, val, NULL);
38 EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
41 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
42 static DEFINE_MUTEX(cpu_add_remove_lock);
45 * The following two API's must be used when attempting
46 * to serialize the updates to cpu_online_mask, cpu_present_mask.
48 void cpu_maps_update_begin(void)
50 mutex_lock(&cpu_add_remove_lock);
53 void cpu_maps_update_done(void)
55 mutex_unlock(&cpu_add_remove_lock);
58 static RAW_NOTIFIER_HEAD(cpu_chain);
60 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
61 * Should always be manipulated under cpu_add_remove_lock
63 static int cpu_hotplug_disabled;
65 #ifdef CONFIG_HOTPLUG_CPU
68 struct task_struct *active_writer;
69 struct mutex lock; /* Synchronizes accesses to refcount, */
71 * Also blocks the new readers during
72 * an ongoing cpu hotplug operation.
76 .active_writer = NULL,
77 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
81 void get_online_cpus(void)
84 if (cpu_hotplug.active_writer == current)
86 mutex_lock(&cpu_hotplug.lock);
87 cpu_hotplug.refcount++;
88 mutex_unlock(&cpu_hotplug.lock);
91 EXPORT_SYMBOL_GPL(get_online_cpus);
93 void put_online_cpus(void)
95 if (cpu_hotplug.active_writer == current)
97 mutex_lock(&cpu_hotplug.lock);
98 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
99 wake_up_process(cpu_hotplug.active_writer);
100 mutex_unlock(&cpu_hotplug.lock);
103 EXPORT_SYMBOL_GPL(put_online_cpus);
106 * This ensures that the hotplug operation can begin only when the
107 * refcount goes to zero.
109 * Note that during a cpu-hotplug operation, the new readers, if any,
110 * will be blocked by the cpu_hotplug.lock
112 * Since cpu_hotplug_begin() is always called after invoking
113 * cpu_maps_update_begin(), we can be sure that only one writer is active.
115 * Note that theoretically, there is a possibility of a livelock:
116 * - Refcount goes to zero, last reader wakes up the sleeping
118 * - Last reader unlocks the cpu_hotplug.lock.
119 * - A new reader arrives at this moment, bumps up the refcount.
120 * - The writer acquires the cpu_hotplug.lock finds the refcount
121 * non zero and goes to sleep again.
123 * However, this is very difficult to achieve in practice since
124 * get_online_cpus() not an api which is called all that often.
127 static void cpu_hotplug_begin(void)
129 cpu_hotplug.active_writer = current;
132 mutex_lock(&cpu_hotplug.lock);
133 if (likely(!cpu_hotplug.refcount))
135 __set_current_state(TASK_UNINTERRUPTIBLE);
136 mutex_unlock(&cpu_hotplug.lock);
141 static void cpu_hotplug_done(void)
143 cpu_hotplug.active_writer = NULL;
144 mutex_unlock(&cpu_hotplug.lock);
147 #else /* #if CONFIG_HOTPLUG_CPU */
148 static void cpu_hotplug_begin(void) {}
149 static void cpu_hotplug_done(void) {}
150 #endif /* #else #if CONFIG_HOTPLUG_CPU */
152 /* Need to know about CPUs going up/down? */
153 int __ref register_cpu_notifier(struct notifier_block *nb)
156 cpu_maps_update_begin();
157 ret = raw_notifier_chain_register(&cpu_chain, nb);
158 cpu_maps_update_done();
162 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
167 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
170 return notifier_to_errno(ret);
173 static int cpu_notify(unsigned long val, void *v)
175 return __cpu_notify(val, v, -1, NULL);
178 #ifdef CONFIG_HOTPLUG_CPU
180 static void cpu_notify_nofail(unsigned long val, void *v)
182 BUG_ON(cpu_notify(val, v));
184 EXPORT_SYMBOL(register_cpu_notifier);
186 void __ref unregister_cpu_notifier(struct notifier_block *nb)
188 cpu_maps_update_begin();
189 raw_notifier_chain_unregister(&cpu_chain, nb);
190 cpu_maps_update_done();
192 EXPORT_SYMBOL(unregister_cpu_notifier);
194 static inline void check_for_tasks(int cpu)
196 struct task_struct *p;
198 write_lock_irq(&tasklist_lock);
199 for_each_process(p) {
200 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
201 (p->utime || p->stime))
202 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
203 "(state = %ld, flags = %x)\n",
204 p->comm, task_pid_nr(p), cpu,
207 write_unlock_irq(&tasklist_lock);
210 struct take_cpu_down_param {
215 /* Take this CPU down. */
216 static int __ref take_cpu_down(void *_param)
218 struct take_cpu_down_param *param = _param;
221 /* Ensure this CPU doesn't handle any more interrupts. */
222 err = __cpu_disable();
226 cpu_notify(CPU_DYING | param->mod, param->hcpu);
230 /* Requires cpu_add_remove_lock to be held */
231 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
233 int err, nr_calls = 0;
234 void *hcpu = (void *)(long)cpu;
235 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
236 struct take_cpu_down_param tcd_param = {
241 if (num_online_cpus() == 1)
244 if (!cpu_online(cpu))
249 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
252 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
253 printk("%s: attempt to take down CPU %u failed\n",
258 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
260 /* CPU didn't die: tell everyone. Can't complain. */
261 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
265 BUG_ON(cpu_online(cpu));
268 * The migration_call() CPU_DYING callback will have removed all
269 * runnable tasks from the cpu, there's only the idle task left now
270 * that the migration thread is done doing the stop_machine thing.
272 * Wait for the stop thread to go away.
274 while (!idle_cpu(cpu))
277 /* This actually kills the CPU. */
280 /* CPU is completely dead: tell everyone. Too late to complain. */
281 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
283 check_for_tasks(cpu);
288 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
292 int __ref cpu_down(unsigned int cpu)
296 cpu_maps_update_begin();
298 if (cpu_hotplug_disabled) {
303 err = _cpu_down(cpu, 0);
306 cpu_maps_update_done();
309 EXPORT_SYMBOL(cpu_down);
310 #endif /*CONFIG_HOTPLUG_CPU*/
312 /* Requires cpu_add_remove_lock to be held */
313 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
315 int ret, nr_calls = 0;
316 void *hcpu = (void *)(long)cpu;
317 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
319 if (cpu_online(cpu) || !cpu_present(cpu))
323 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
326 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
331 /* Arch-specific enabling code. */
335 BUG_ON(!cpu_online(cpu));
337 /* Now call notifier in preparation. */
338 cpu_notify(CPU_ONLINE | mod, hcpu);
342 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
348 int __cpuinit cpu_up(unsigned int cpu)
352 #ifdef CONFIG_MEMORY_HOTPLUG
357 if (!cpu_possible(cpu)) {
358 printk(KERN_ERR "can't online cpu %d because it is not "
359 "configured as may-hotadd at boot time\n", cpu);
360 #if defined(CONFIG_IA64)
361 printk(KERN_ERR "please check additional_cpus= boot "
367 #ifdef CONFIG_MEMORY_HOTPLUG
368 nid = cpu_to_node(cpu);
369 if (!node_online(nid)) {
370 err = mem_online_node(nid);
375 pgdat = NODE_DATA(nid);
378 "Can't online cpu %d due to NULL pgdat\n", cpu);
382 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
383 mutex_lock(&zonelists_mutex);
384 build_all_zonelists(NULL);
385 mutex_unlock(&zonelists_mutex);
389 cpu_maps_update_begin();
391 if (cpu_hotplug_disabled) {
396 err = _cpu_up(cpu, 0);
399 cpu_maps_update_done();
402 EXPORT_SYMBOL_GPL(cpu_up);
404 #ifdef CONFIG_PM_SLEEP_SMP
405 static cpumask_var_t frozen_cpus;
407 void __weak arch_disable_nonboot_cpus_begin(void)
411 void __weak arch_disable_nonboot_cpus_end(void)
415 int disable_nonboot_cpus(void)
417 int cpu, first_cpu, error = 0;
419 cpu_maps_update_begin();
420 first_cpu = cpumask_first(cpu_online_mask);
422 * We take down all of the non-boot CPUs in one shot to avoid races
423 * with the userspace trying to use the CPU hotplug at the same time
425 cpumask_clear(frozen_cpus);
426 arch_disable_nonboot_cpus_begin();
428 printk("Disabling non-boot CPUs ...\n");
429 for_each_online_cpu(cpu) {
430 if (cpu == first_cpu)
432 error = _cpu_down(cpu, 1);
434 cpumask_set_cpu(cpu, frozen_cpus);
436 printk(KERN_ERR "Error taking CPU%d down: %d\n",
442 arch_disable_nonboot_cpus_end();
445 BUG_ON(num_online_cpus() > 1);
446 /* Make sure the CPUs won't be enabled by someone else */
447 cpu_hotplug_disabled = 1;
449 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
451 cpu_maps_update_done();
455 void __weak arch_enable_nonboot_cpus_begin(void)
459 void __weak arch_enable_nonboot_cpus_end(void)
463 void __ref enable_nonboot_cpus(void)
467 /* Allow everyone to use the CPU hotplug again */
468 cpu_maps_update_begin();
469 cpu_hotplug_disabled = 0;
470 if (cpumask_empty(frozen_cpus))
473 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
475 arch_enable_nonboot_cpus_begin();
477 for_each_cpu(cpu, frozen_cpus) {
478 error = _cpu_up(cpu, 1);
480 printk(KERN_INFO "CPU%d is up\n", cpu);
483 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
486 arch_enable_nonboot_cpus_end();
488 cpumask_clear(frozen_cpus);
490 cpu_maps_update_done();
493 static int __init alloc_frozen_cpus(void)
495 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
499 core_initcall(alloc_frozen_cpus);
502 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
503 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
504 * to continue until any currently running CPU hotplug operation gets
506 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
507 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
508 * CPU hotplug path and released only after it is complete. Thus, we
509 * (and hence the freezer) will block here until any currently running CPU
510 * hotplug operation gets completed.
512 void cpu_hotplug_disable_before_freeze(void)
514 cpu_maps_update_begin();
515 cpu_hotplug_disabled = 1;
516 cpu_maps_update_done();
521 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
522 * disabled while beginning to freeze tasks).
524 void cpu_hotplug_enable_after_thaw(void)
526 cpu_maps_update_begin();
527 cpu_hotplug_disabled = 0;
528 cpu_maps_update_done();
532 * When callbacks for CPU hotplug notifications are being executed, we must
533 * ensure that the state of the system with respect to the tasks being frozen
534 * or not, as reported by the notification, remains unchanged *throughout the
535 * duration* of the execution of the callbacks.
536 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
538 * This synchronization is implemented by mutually excluding regular CPU
539 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
540 * Hibernate notifications.
543 cpu_hotplug_pm_callback(struct notifier_block *nb,
544 unsigned long action, void *ptr)
548 case PM_SUSPEND_PREPARE:
549 case PM_HIBERNATION_PREPARE:
550 cpu_hotplug_disable_before_freeze();
553 case PM_POST_SUSPEND:
554 case PM_POST_HIBERNATION:
555 cpu_hotplug_enable_after_thaw();
566 static int __init cpu_hotplug_pm_sync_init(void)
568 pm_notifier(cpu_hotplug_pm_callback, 0);
571 core_initcall(cpu_hotplug_pm_sync_init);
573 #endif /* CONFIG_PM_SLEEP_SMP */
576 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
577 * @cpu: cpu that just started
579 * This function calls the cpu_chain notifiers with CPU_STARTING.
580 * It must be called by the arch code on the new cpu, before the new cpu
581 * enables interrupts and before the "boot" cpu returns from __cpu_up().
583 void __cpuinit notify_cpu_starting(unsigned int cpu)
585 unsigned long val = CPU_STARTING;
587 #ifdef CONFIG_PM_SLEEP_SMP
588 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
589 val = CPU_STARTING_FROZEN;
590 #endif /* CONFIG_PM_SLEEP_SMP */
591 cpu_notify(val, (void *)(long)cpu);
594 #endif /* CONFIG_SMP */
597 * cpu_bit_bitmap[] is a special, "compressed" data structure that
598 * represents all NR_CPUS bits binary values of 1<<nr.
600 * It is used by cpumask_of() to get a constant address to a CPU
601 * mask value that has a single bit set only.
604 /* cpu_bit_bitmap[0] is empty - so we can back into it */
605 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
606 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
607 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
608 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
610 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
612 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
613 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
614 #if BITS_PER_LONG > 32
615 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
616 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
619 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
621 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
622 EXPORT_SYMBOL(cpu_all_bits);
624 #ifdef CONFIG_INIT_ALL_POSSIBLE
625 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
628 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
630 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
631 EXPORT_SYMBOL(cpu_possible_mask);
633 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
634 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
635 EXPORT_SYMBOL(cpu_online_mask);
637 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
638 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
639 EXPORT_SYMBOL(cpu_present_mask);
641 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
642 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
643 EXPORT_SYMBOL(cpu_active_mask);
645 void set_cpu_possible(unsigned int cpu, bool possible)
648 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
650 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
653 void set_cpu_present(unsigned int cpu, bool present)
656 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
658 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
661 void set_cpu_online(unsigned int cpu, bool online)
664 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
666 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
669 void set_cpu_active(unsigned int cpu, bool active)
672 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
674 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
677 void init_cpu_present(const struct cpumask *src)
679 cpumask_copy(to_cpumask(cpu_present_bits), src);
682 void init_cpu_possible(const struct cpumask *src)
684 cpumask_copy(to_cpumask(cpu_possible_bits), src);
687 void init_cpu_online(const struct cpumask *src)
689 cpumask_copy(to_cpumask(cpu_online_bits), src);