2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/kthread.h>
17 #include <linux/stop_machine.h>
18 #include <linux/mutex.h>
19 #include <linux/gfp.h>
20 #include <linux/suspend.h>
25 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
26 static DEFINE_MUTEX(cpu_add_remove_lock);
29 * The following two API's must be used when attempting
30 * to serialize the updates to cpu_online_mask, cpu_present_mask.
32 void cpu_maps_update_begin(void)
34 mutex_lock(&cpu_add_remove_lock);
37 void cpu_maps_update_done(void)
39 mutex_unlock(&cpu_add_remove_lock);
42 static RAW_NOTIFIER_HEAD(cpu_chain);
44 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
45 * Should always be manipulated under cpu_add_remove_lock
47 static int cpu_hotplug_disabled;
49 #ifdef CONFIG_HOTPLUG_CPU
52 struct task_struct *active_writer;
53 struct mutex lock; /* Synchronizes accesses to refcount, */
55 * Also blocks the new readers during
56 * an ongoing cpu hotplug operation.
60 .active_writer = NULL,
61 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
65 void get_online_cpus(void)
68 if (cpu_hotplug.active_writer == current)
70 mutex_lock(&cpu_hotplug.lock);
71 cpu_hotplug.refcount++;
72 mutex_unlock(&cpu_hotplug.lock);
75 EXPORT_SYMBOL_GPL(get_online_cpus);
77 void put_online_cpus(void)
79 if (cpu_hotplug.active_writer == current)
81 mutex_lock(&cpu_hotplug.lock);
82 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
83 wake_up_process(cpu_hotplug.active_writer);
84 mutex_unlock(&cpu_hotplug.lock);
87 EXPORT_SYMBOL_GPL(put_online_cpus);
90 * This ensures that the hotplug operation can begin only when the
91 * refcount goes to zero.
93 * Note that during a cpu-hotplug operation, the new readers, if any,
94 * will be blocked by the cpu_hotplug.lock
96 * Since cpu_hotplug_begin() is always called after invoking
97 * cpu_maps_update_begin(), we can be sure that only one writer is active.
99 * Note that theoretically, there is a possibility of a livelock:
100 * - Refcount goes to zero, last reader wakes up the sleeping
102 * - Last reader unlocks the cpu_hotplug.lock.
103 * - A new reader arrives at this moment, bumps up the refcount.
104 * - The writer acquires the cpu_hotplug.lock finds the refcount
105 * non zero and goes to sleep again.
107 * However, this is very difficult to achieve in practice since
108 * get_online_cpus() not an api which is called all that often.
111 static void cpu_hotplug_begin(void)
113 cpu_hotplug.active_writer = current;
116 mutex_lock(&cpu_hotplug.lock);
117 if (likely(!cpu_hotplug.refcount))
119 __set_current_state(TASK_UNINTERRUPTIBLE);
120 mutex_unlock(&cpu_hotplug.lock);
125 static void cpu_hotplug_done(void)
127 cpu_hotplug.active_writer = NULL;
128 mutex_unlock(&cpu_hotplug.lock);
131 #else /* #if CONFIG_HOTPLUG_CPU */
132 static void cpu_hotplug_begin(void) {}
133 static void cpu_hotplug_done(void) {}
134 #endif /* #else #if CONFIG_HOTPLUG_CPU */
136 /* Need to know about CPUs going up/down? */
137 int __ref register_cpu_notifier(struct notifier_block *nb)
140 cpu_maps_update_begin();
141 ret = raw_notifier_chain_register(&cpu_chain, nb);
142 cpu_maps_update_done();
146 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
151 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
154 return notifier_to_errno(ret);
157 static int cpu_notify(unsigned long val, void *v)
159 return __cpu_notify(val, v, -1, NULL);
162 #ifdef CONFIG_HOTPLUG_CPU
164 static void cpu_notify_nofail(unsigned long val, void *v)
166 BUG_ON(cpu_notify(val, v));
168 EXPORT_SYMBOL(register_cpu_notifier);
170 void __ref unregister_cpu_notifier(struct notifier_block *nb)
172 cpu_maps_update_begin();
173 raw_notifier_chain_unregister(&cpu_chain, nb);
174 cpu_maps_update_done();
176 EXPORT_SYMBOL(unregister_cpu_notifier);
178 void clear_tasks_mm_cpumask(int cpu)
180 struct task_struct *p;
183 * This function is called after the cpu is taken down and marked
184 * offline, so its not like new tasks will ever get this cpu set in
185 * their mm mask. -- Peter Zijlstra
186 * Thus, we may use rcu_read_lock() here, instead of grabbing
187 * full-fledged tasklist_lock.
190 for_each_process(p) {
191 struct task_struct *t;
193 t = find_lock_task_mm(p);
196 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
202 static inline void check_for_tasks(int cpu)
204 struct task_struct *p;
206 write_lock_irq(&tasklist_lock);
207 for_each_process(p) {
208 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
209 (p->utime || p->stime))
210 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
211 "(state = %ld, flags = %x)\n",
212 p->comm, task_pid_nr(p), cpu,
215 write_unlock_irq(&tasklist_lock);
218 struct take_cpu_down_param {
223 /* Take this CPU down. */
224 static int __ref take_cpu_down(void *_param)
226 struct take_cpu_down_param *param = _param;
229 /* Ensure this CPU doesn't handle any more interrupts. */
230 err = __cpu_disable();
234 cpu_notify(CPU_DYING | param->mod, param->hcpu);
238 /* Requires cpu_add_remove_lock to be held */
239 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
241 int err, nr_calls = 0;
242 void *hcpu = (void *)(long)cpu;
243 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
244 struct take_cpu_down_param tcd_param = {
249 if (num_online_cpus() == 1)
252 if (!cpu_online(cpu))
257 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
260 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
261 printk("%s: attempt to take down CPU %u failed\n",
266 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
268 /* CPU didn't die: tell everyone. Can't complain. */
269 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
273 BUG_ON(cpu_online(cpu));
276 * The migration_call() CPU_DYING callback will have removed all
277 * runnable tasks from the cpu, there's only the idle task left now
278 * that the migration thread is done doing the stop_machine thing.
280 * Wait for the stop thread to go away.
282 while (!idle_cpu(cpu))
285 /* This actually kills the CPU. */
288 /* CPU is completely dead: tell everyone. Too late to complain. */
289 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
291 check_for_tasks(cpu);
296 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
300 int __ref cpu_down(unsigned int cpu)
304 cpu_maps_update_begin();
306 if (cpu_hotplug_disabled) {
311 err = _cpu_down(cpu, 0);
314 cpu_maps_update_done();
317 EXPORT_SYMBOL(cpu_down);
318 #endif /*CONFIG_HOTPLUG_CPU*/
320 /* Requires cpu_add_remove_lock to be held */
321 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
323 int ret, nr_calls = 0;
324 void *hcpu = (void *)(long)cpu;
325 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
326 struct task_struct *idle;
328 if (cpu_online(cpu) || !cpu_present(cpu))
333 idle = idle_thread_get(cpu);
339 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
342 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
347 /* Arch-specific enabling code. */
348 ret = __cpu_up(cpu, idle);
351 BUG_ON(!cpu_online(cpu));
353 /* Now call notifier in preparation. */
354 cpu_notify(CPU_ONLINE | mod, hcpu);
358 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
365 int __cpuinit cpu_up(unsigned int cpu)
369 #ifdef CONFIG_MEMORY_HOTPLUG
374 if (!cpu_possible(cpu)) {
375 printk(KERN_ERR "can't online cpu %d because it is not "
376 "configured as may-hotadd at boot time\n", cpu);
377 #if defined(CONFIG_IA64)
378 printk(KERN_ERR "please check additional_cpus= boot "
384 #ifdef CONFIG_MEMORY_HOTPLUG
385 nid = cpu_to_node(cpu);
386 if (!node_online(nid)) {
387 err = mem_online_node(nid);
392 pgdat = NODE_DATA(nid);
395 "Can't online cpu %d due to NULL pgdat\n", cpu);
399 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
400 mutex_lock(&zonelists_mutex);
401 build_all_zonelists(NULL);
402 mutex_unlock(&zonelists_mutex);
406 cpu_maps_update_begin();
408 if (cpu_hotplug_disabled) {
413 err = _cpu_up(cpu, 0);
416 cpu_maps_update_done();
419 EXPORT_SYMBOL_GPL(cpu_up);
421 #ifdef CONFIG_PM_SLEEP_SMP
422 static cpumask_var_t frozen_cpus;
424 void __weak arch_disable_nonboot_cpus_begin(void)
428 void __weak arch_disable_nonboot_cpus_end(void)
432 int disable_nonboot_cpus(void)
434 int cpu, first_cpu, error = 0;
436 cpu_maps_update_begin();
437 first_cpu = cpumask_first(cpu_online_mask);
439 * We take down all of the non-boot CPUs in one shot to avoid races
440 * with the userspace trying to use the CPU hotplug at the same time
442 cpumask_clear(frozen_cpus);
443 arch_disable_nonboot_cpus_begin();
445 printk("Disabling non-boot CPUs ...\n");
446 for_each_online_cpu(cpu) {
447 if (cpu == first_cpu)
449 error = _cpu_down(cpu, 1);
451 cpumask_set_cpu(cpu, frozen_cpus);
453 printk(KERN_ERR "Error taking CPU%d down: %d\n",
459 arch_disable_nonboot_cpus_end();
462 BUG_ON(num_online_cpus() > 1);
463 /* Make sure the CPUs won't be enabled by someone else */
464 cpu_hotplug_disabled = 1;
466 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
468 cpu_maps_update_done();
472 void __weak arch_enable_nonboot_cpus_begin(void)
476 void __weak arch_enable_nonboot_cpus_end(void)
480 void __ref enable_nonboot_cpus(void)
484 /* Allow everyone to use the CPU hotplug again */
485 cpu_maps_update_begin();
486 cpu_hotplug_disabled = 0;
487 if (cpumask_empty(frozen_cpus))
490 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
492 arch_enable_nonboot_cpus_begin();
494 for_each_cpu(cpu, frozen_cpus) {
495 error = _cpu_up(cpu, 1);
497 printk(KERN_INFO "CPU%d is up\n", cpu);
500 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
503 arch_enable_nonboot_cpus_end();
505 cpumask_clear(frozen_cpus);
507 cpu_maps_update_done();
510 static int __init alloc_frozen_cpus(void)
512 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
516 core_initcall(alloc_frozen_cpus);
519 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
520 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
521 * to continue until any currently running CPU hotplug operation gets
523 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
524 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
525 * CPU hotplug path and released only after it is complete. Thus, we
526 * (and hence the freezer) will block here until any currently running CPU
527 * hotplug operation gets completed.
529 void cpu_hotplug_disable_before_freeze(void)
531 cpu_maps_update_begin();
532 cpu_hotplug_disabled = 1;
533 cpu_maps_update_done();
538 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
539 * disabled while beginning to freeze tasks).
541 void cpu_hotplug_enable_after_thaw(void)
543 cpu_maps_update_begin();
544 cpu_hotplug_disabled = 0;
545 cpu_maps_update_done();
549 * When callbacks for CPU hotplug notifications are being executed, we must
550 * ensure that the state of the system with respect to the tasks being frozen
551 * or not, as reported by the notification, remains unchanged *throughout the
552 * duration* of the execution of the callbacks.
553 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
555 * This synchronization is implemented by mutually excluding regular CPU
556 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
557 * Hibernate notifications.
560 cpu_hotplug_pm_callback(struct notifier_block *nb,
561 unsigned long action, void *ptr)
565 case PM_SUSPEND_PREPARE:
566 case PM_HIBERNATION_PREPARE:
567 cpu_hotplug_disable_before_freeze();
570 case PM_POST_SUSPEND:
571 case PM_POST_HIBERNATION:
572 cpu_hotplug_enable_after_thaw();
583 static int __init cpu_hotplug_pm_sync_init(void)
585 pm_notifier(cpu_hotplug_pm_callback, 0);
588 core_initcall(cpu_hotplug_pm_sync_init);
590 #endif /* CONFIG_PM_SLEEP_SMP */
593 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
594 * @cpu: cpu that just started
596 * This function calls the cpu_chain notifiers with CPU_STARTING.
597 * It must be called by the arch code on the new cpu, before the new cpu
598 * enables interrupts and before the "boot" cpu returns from __cpu_up().
600 void __cpuinit notify_cpu_starting(unsigned int cpu)
602 unsigned long val = CPU_STARTING;
604 #ifdef CONFIG_PM_SLEEP_SMP
605 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
606 val = CPU_STARTING_FROZEN;
607 #endif /* CONFIG_PM_SLEEP_SMP */
608 cpu_notify(val, (void *)(long)cpu);
611 #endif /* CONFIG_SMP */
614 * cpu_bit_bitmap[] is a special, "compressed" data structure that
615 * represents all NR_CPUS bits binary values of 1<<nr.
617 * It is used by cpumask_of() to get a constant address to a CPU
618 * mask value that has a single bit set only.
621 /* cpu_bit_bitmap[0] is empty - so we can back into it */
622 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
623 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
624 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
625 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
627 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
629 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
630 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
631 #if BITS_PER_LONG > 32
632 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
633 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
636 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
638 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
639 EXPORT_SYMBOL(cpu_all_bits);
641 #ifdef CONFIG_INIT_ALL_POSSIBLE
642 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
645 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
647 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
648 EXPORT_SYMBOL(cpu_possible_mask);
650 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
651 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
652 EXPORT_SYMBOL(cpu_online_mask);
654 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
655 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
656 EXPORT_SYMBOL(cpu_present_mask);
658 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
659 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
660 EXPORT_SYMBOL(cpu_active_mask);
662 void set_cpu_possible(unsigned int cpu, bool possible)
665 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
667 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
670 void set_cpu_present(unsigned int cpu, bool present)
673 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
675 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
678 void set_cpu_online(unsigned int cpu, bool online)
681 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
683 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
686 void set_cpu_active(unsigned int cpu, bool active)
689 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
691 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
694 void init_cpu_present(const struct cpumask *src)
696 cpumask_copy(to_cpumask(cpu_present_bits), src);
699 void init_cpu_possible(const struct cpumask *src)
701 cpumask_copy(to_cpumask(cpu_possible_bits), src);
704 void init_cpu_online(const struct cpumask *src)
706 cpumask_copy(to_cpumask(cpu_online_bits), src);