4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002 - 2010 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <linux/atomic.h>
25 #include <asm/processor.h>
26 #include <asm/mmu_context.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
32 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
33 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
35 struct plat_smp_ops *mp_ops = NULL;
37 /* State of each CPU */
38 DEFINE_PER_CPU(int, cpu_state) = { 0 };
40 void register_smp_ops(struct plat_smp_ops *ops)
43 printk(KERN_WARNING "Overriding previously set SMP ops\n");
48 static inline void smp_store_cpu_info(unsigned int cpu)
50 struct sh_cpuinfo *c = cpu_data + cpu;
52 memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
54 c->loops_per_jiffy = loops_per_jiffy;
57 void __init smp_prepare_cpus(unsigned int max_cpus)
59 unsigned int cpu = smp_processor_id();
61 init_new_context(current, &init_mm);
62 current_thread_info()->cpu = cpu;
63 mp_ops->prepare_cpus(max_cpus);
65 #ifndef CONFIG_HOTPLUG_CPU
66 init_cpu_present(cpu_possible_mask);
70 void __init smp_prepare_boot_cpu(void)
72 unsigned int cpu = smp_processor_id();
74 __cpu_number_map[0] = cpu;
75 __cpu_logical_map[0] = cpu;
77 set_cpu_online(cpu, true);
78 set_cpu_possible(cpu, true);
80 per_cpu(cpu_state, cpu) = CPU_ONLINE;
83 #ifdef CONFIG_HOTPLUG_CPU
84 void native_cpu_die(unsigned int cpu)
88 for (i = 0; i < 10; i++) {
90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91 if (system_state == SYSTEM_RUNNING)
92 pr_info("CPU %u is now offline\n", cpu);
100 pr_err("CPU %u didn't die...\n", cpu);
103 int native_cpu_disable(unsigned int cpu)
105 return cpu == 0 ? -EPERM : 0;
108 void play_dead_common(void)
111 irq_ctx_exit(raw_smp_processor_id());
114 __this_cpu_write(cpu_state, CPU_DEAD);
118 void native_play_dead(void)
123 int __cpu_disable(void)
125 unsigned int cpu = smp_processor_id();
128 ret = mp_ops->cpu_disable(cpu);
133 * Take this CPU offline. Once we clear this, we can't return,
134 * and we must not schedule until we're ready to give up the cpu.
136 set_cpu_online(cpu, false);
139 * OK - migrate IRQs away from this CPU
144 * Stop the local timer for this CPU.
146 local_timer_stop(cpu);
149 * Flush user cache and TLB mappings, and then remove this CPU
150 * from the vm mask set of all processes.
154 local_flush_tlb_all();
157 clear_tasks_mm_cpumask(cpu);
161 #else /* ... !CONFIG_HOTPLUG_CPU */
162 int native_cpu_disable(unsigned int cpu)
167 void native_cpu_die(unsigned int cpu)
169 /* We said "no" in __cpu_disable */
173 void native_play_dead(void)
179 asmlinkage void start_secondary(void)
181 unsigned int cpu = smp_processor_id();
182 struct mm_struct *mm = &init_mm;
185 atomic_inc(&mm->mm_count);
186 atomic_inc(&mm->mm_users);
187 current->active_mm = mm;
189 enter_lazy_tlb(mm, current);
190 local_flush_tlb_all();
197 notify_cpu_starting(cpu);
201 /* Enable local timers */
202 local_timer_setup(cpu);
205 smp_store_cpu_info(cpu);
207 set_cpu_online(cpu, true);
208 per_cpu(cpu_state, cpu) = CPU_ONLINE;
210 cpu_startup_entry(CPUHP_ONLINE);
215 unsigned long bss_start;
216 unsigned long bss_end;
217 void *start_kernel_fn;
222 int __cpu_up(unsigned int cpu, struct task_struct *tsk)
224 unsigned long timeout;
226 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
228 /* Fill in data in head.S for secondary cpus */
229 stack_start.sp = tsk->thread.sp;
230 stack_start.thread_info = tsk->stack;
231 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
232 stack_start.start_kernel_fn = start_secondary;
234 flush_icache_range((unsigned long)&stack_start,
235 (unsigned long)&stack_start + sizeof(stack_start));
238 mp_ops->start_cpu(cpu, (unsigned long)_stext);
240 timeout = jiffies + HZ;
241 while (time_before(jiffies, timeout)) {
255 void __init smp_cpus_done(unsigned int max_cpus)
257 unsigned long bogosum = 0;
260 for_each_online_cpu(cpu)
261 bogosum += cpu_data[cpu].loops_per_jiffy;
263 printk(KERN_INFO "SMP: Total of %d processors activated "
264 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
265 bogosum / (500000/HZ),
266 (bogosum / (5000/HZ)) % 100);
269 void smp_send_reschedule(int cpu)
271 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
274 void smp_send_stop(void)
276 smp_call_function(stop_this_cpu, 0, 0);
279 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
283 for_each_cpu(cpu, mask)
284 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
287 void arch_send_call_function_single_ipi(int cpu)
289 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
292 void smp_timer_broadcast(const struct cpumask *mask)
296 for_each_cpu(cpu, mask)
297 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
300 static void ipi_timer(void)
303 local_timer_interrupt();
307 void smp_message_recv(unsigned int msg)
310 case SMP_MSG_FUNCTION:
311 generic_smp_call_function_interrupt();
313 case SMP_MSG_RESCHEDULE:
316 case SMP_MSG_FUNCTION_SINGLE:
317 generic_smp_call_function_single_interrupt();
323 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
324 smp_processor_id(), __func__, msg);
329 /* Not really SMP stuff ... */
330 int setup_profiling_timer(unsigned int multiplier)
337 static void flush_tlb_all_ipi(void *info)
339 local_flush_tlb_all();
342 void flush_tlb_all(void)
344 on_each_cpu(flush_tlb_all_ipi, 0, 1);
347 static void flush_tlb_mm_ipi(void *mm)
349 local_flush_tlb_mm((struct mm_struct *)mm);
353 * The following tlb flush calls are invoked when old translations are
354 * being torn down, or pte attributes are changing. For single threaded
355 * address spaces, a new context is obtained on the current cpu, and tlb
356 * context on other cpus are invalidated to force a new context allocation
357 * at switch_mm time, should the mm ever be used on other cpus. For
358 * multithreaded address spaces, intercpu interrupts have to be sent.
359 * Another case where intercpu interrupts are required is when the target
360 * mm might be active on another cpu (eg debuggers doing the flushes on
361 * behalf of debugees, kswapd stealing pages from another process etc).
364 void flush_tlb_mm(struct mm_struct *mm)
368 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
369 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
372 for_each_online_cpu(i)
373 if (smp_processor_id() != i)
374 cpu_context(i, mm) = 0;
376 local_flush_tlb_mm(mm);
381 struct flush_tlb_data {
382 struct vm_area_struct *vma;
387 static void flush_tlb_range_ipi(void *info)
389 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
391 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
394 void flush_tlb_range(struct vm_area_struct *vma,
395 unsigned long start, unsigned long end)
397 struct mm_struct *mm = vma->vm_mm;
400 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
401 struct flush_tlb_data fd;
406 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
409 for_each_online_cpu(i)
410 if (smp_processor_id() != i)
411 cpu_context(i, mm) = 0;
413 local_flush_tlb_range(vma, start, end);
417 static void flush_tlb_kernel_range_ipi(void *info)
419 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
421 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
424 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
426 struct flush_tlb_data fd;
430 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
433 static void flush_tlb_page_ipi(void *info)
435 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
437 local_flush_tlb_page(fd->vma, fd->addr1);
440 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
443 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
444 (current->mm != vma->vm_mm)) {
445 struct flush_tlb_data fd;
449 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
452 for_each_online_cpu(i)
453 if (smp_processor_id() != i)
454 cpu_context(i, vma->vm_mm) = 0;
456 local_flush_tlb_page(vma, page);
460 static void flush_tlb_one_ipi(void *info)
462 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
463 local_flush_tlb_one(fd->addr1, fd->addr2);
466 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
468 struct flush_tlb_data fd;
473 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
474 local_flush_tlb_one(asid, vaddr);