a66f2aa53dac9f3ba5026b4d0f37cc55a0a468f2
[cascardo/linux.git] / arch / sh / kernel / smp.c
1 /*
2  * arch/sh/kernel/smp.c
3  *
4  * SMP support for the SuperH processors.
5  *
6  * Copyright (C) 2002 - 2010 Paul Mundt
7  * Copyright (C) 2006 - 2007 Akio Idehara
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/cpu.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <linux/atomic.h>
25 #include <asm/processor.h>
26 #include <asm/mmu_context.h>
27 #include <asm/smp.h>
28 #include <asm/cacheflush.h>
29 #include <asm/sections.h>
30 #include <asm/setup.h>
31
32 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
33 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
34
35 struct plat_smp_ops *mp_ops = NULL;
36
37 /* State of each CPU */
38 DEFINE_PER_CPU(int, cpu_state) = { 0 };
39
40 void register_smp_ops(struct plat_smp_ops *ops)
41 {
42         if (mp_ops)
43                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
44
45         mp_ops = ops;
46 }
47
48 static inline void smp_store_cpu_info(unsigned int cpu)
49 {
50         struct sh_cpuinfo *c = cpu_data + cpu;
51
52         memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
53
54         c->loops_per_jiffy = loops_per_jiffy;
55 }
56
57 void __init smp_prepare_cpus(unsigned int max_cpus)
58 {
59         unsigned int cpu = smp_processor_id();
60
61         init_new_context(current, &init_mm);
62         current_thread_info()->cpu = cpu;
63         mp_ops->prepare_cpus(max_cpus);
64
65 #ifndef CONFIG_HOTPLUG_CPU
66         init_cpu_present(cpu_possible_mask);
67 #endif
68 }
69
70 void __init smp_prepare_boot_cpu(void)
71 {
72         unsigned int cpu = smp_processor_id();
73
74         __cpu_number_map[0] = cpu;
75         __cpu_logical_map[0] = cpu;
76
77         set_cpu_online(cpu, true);
78         set_cpu_possible(cpu, true);
79
80         per_cpu(cpu_state, cpu) = CPU_ONLINE;
81 }
82
83 #ifdef CONFIG_HOTPLUG_CPU
84 void native_cpu_die(unsigned int cpu)
85 {
86         unsigned int i;
87
88         for (i = 0; i < 10; i++) {
89                 smp_rmb();
90                 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
91                         if (system_state == SYSTEM_RUNNING)
92                                 pr_info("CPU %u is now offline\n", cpu);
93
94                         return;
95                 }
96
97                 msleep(100);
98         }
99
100         pr_err("CPU %u didn't die...\n", cpu);
101 }
102
103 int native_cpu_disable(unsigned int cpu)
104 {
105         return cpu == 0 ? -EPERM : 0;
106 }
107
108 void play_dead_common(void)
109 {
110         idle_task_exit();
111         irq_ctx_exit(raw_smp_processor_id());
112         mb();
113
114         __this_cpu_write(cpu_state, CPU_DEAD);
115         local_irq_disable();
116 }
117
118 void native_play_dead(void)
119 {
120         play_dead_common();
121 }
122
123 int __cpu_disable(void)
124 {
125         unsigned int cpu = smp_processor_id();
126         int ret;
127
128         ret = mp_ops->cpu_disable(cpu);
129         if (ret)
130                 return ret;
131
132         /*
133          * Take this CPU offline.  Once we clear this, we can't return,
134          * and we must not schedule until we're ready to give up the cpu.
135          */
136         set_cpu_online(cpu, false);
137
138         /*
139          * OK - migrate IRQs away from this CPU
140          */
141         migrate_irqs();
142
143         /*
144          * Stop the local timer for this CPU.
145          */
146         local_timer_stop(cpu);
147
148         /*
149          * Flush user cache and TLB mappings, and then remove this CPU
150          * from the vm mask set of all processes.
151          */
152         flush_cache_all();
153 #ifdef CONFIG_MMU
154         local_flush_tlb_all();
155 #endif
156
157         clear_tasks_mm_cpumask(cpu);
158
159         return 0;
160 }
161 #else /* ... !CONFIG_HOTPLUG_CPU */
162 int native_cpu_disable(unsigned int cpu)
163 {
164         return -ENOSYS;
165 }
166
167 void native_cpu_die(unsigned int cpu)
168 {
169         /* We said "no" in __cpu_disable */
170         BUG();
171 }
172
173 void native_play_dead(void)
174 {
175         BUG();
176 }
177 #endif
178
179 asmlinkage void start_secondary(void)
180 {
181         unsigned int cpu = smp_processor_id();
182         struct mm_struct *mm = &init_mm;
183
184         enable_mmu();
185         atomic_inc(&mm->mm_count);
186         atomic_inc(&mm->mm_users);
187         current->active_mm = mm;
188 #ifdef CONFIG_MMU
189         enter_lazy_tlb(mm, current);
190         local_flush_tlb_all();
191 #endif
192
193         per_cpu_trap_init();
194
195         preempt_disable();
196
197         notify_cpu_starting(cpu);
198
199         local_irq_enable();
200
201         /* Enable local timers */
202         local_timer_setup(cpu);
203         calibrate_delay();
204
205         smp_store_cpu_info(cpu);
206
207         set_cpu_online(cpu, true);
208         per_cpu(cpu_state, cpu) = CPU_ONLINE;
209
210         cpu_startup_entry(CPUHP_ONLINE);
211 }
212
213 extern struct {
214         unsigned long sp;
215         unsigned long bss_start;
216         unsigned long bss_end;
217         void *start_kernel_fn;
218         void *cpu_init_fn;
219         void *thread_info;
220 } stack_start;
221
222 int __cpu_up(unsigned int cpu, struct task_struct *tsk)
223 {
224         unsigned long timeout;
225
226         per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
227
228         /* Fill in data in head.S for secondary cpus */
229         stack_start.sp = tsk->thread.sp;
230         stack_start.thread_info = tsk->stack;
231         stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
232         stack_start.start_kernel_fn = start_secondary;
233
234         flush_icache_range((unsigned long)&stack_start,
235                            (unsigned long)&stack_start + sizeof(stack_start));
236         wmb();
237
238         mp_ops->start_cpu(cpu, (unsigned long)_stext);
239
240         timeout = jiffies + HZ;
241         while (time_before(jiffies, timeout)) {
242                 if (cpu_online(cpu))
243                         break;
244
245                 udelay(10);
246                 barrier();
247         }
248
249         if (cpu_online(cpu))
250                 return 0;
251
252         return -ENOENT;
253 }
254
255 void __init smp_cpus_done(unsigned int max_cpus)
256 {
257         unsigned long bogosum = 0;
258         int cpu;
259
260         for_each_online_cpu(cpu)
261                 bogosum += cpu_data[cpu].loops_per_jiffy;
262
263         printk(KERN_INFO "SMP: Total of %d processors activated "
264                "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
265                bogosum / (500000/HZ),
266                (bogosum / (5000/HZ)) % 100);
267 }
268
269 void smp_send_reschedule(int cpu)
270 {
271         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
272 }
273
274 void smp_send_stop(void)
275 {
276         smp_call_function(stop_this_cpu, 0, 0);
277 }
278
279 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
280 {
281         int cpu;
282
283         for_each_cpu(cpu, mask)
284                 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
285 }
286
287 void arch_send_call_function_single_ipi(int cpu)
288 {
289         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
290 }
291
292 void smp_timer_broadcast(const struct cpumask *mask)
293 {
294         int cpu;
295
296         for_each_cpu(cpu, mask)
297                 mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
298 }
299
300 static void ipi_timer(void)
301 {
302         irq_enter();
303         local_timer_interrupt();
304         irq_exit();
305 }
306
307 void smp_message_recv(unsigned int msg)
308 {
309         switch (msg) {
310         case SMP_MSG_FUNCTION:
311                 generic_smp_call_function_interrupt();
312                 break;
313         case SMP_MSG_RESCHEDULE:
314                 scheduler_ipi();
315                 break;
316         case SMP_MSG_FUNCTION_SINGLE:
317                 generic_smp_call_function_single_interrupt();
318                 break;
319         case SMP_MSG_TIMER:
320                 ipi_timer();
321                 break;
322         default:
323                 printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
324                        smp_processor_id(), __func__, msg);
325                 break;
326         }
327 }
328
329 /* Not really SMP stuff ... */
330 int setup_profiling_timer(unsigned int multiplier)
331 {
332         return 0;
333 }
334
335 #ifdef CONFIG_MMU
336
337 static void flush_tlb_all_ipi(void *info)
338 {
339         local_flush_tlb_all();
340 }
341
342 void flush_tlb_all(void)
343 {
344         on_each_cpu(flush_tlb_all_ipi, 0, 1);
345 }
346
347 static void flush_tlb_mm_ipi(void *mm)
348 {
349         local_flush_tlb_mm((struct mm_struct *)mm);
350 }
351
352 /*
353  * The following tlb flush calls are invoked when old translations are
354  * being torn down, or pte attributes are changing. For single threaded
355  * address spaces, a new context is obtained on the current cpu, and tlb
356  * context on other cpus are invalidated to force a new context allocation
357  * at switch_mm time, should the mm ever be used on other cpus. For
358  * multithreaded address spaces, intercpu interrupts have to be sent.
359  * Another case where intercpu interrupts are required is when the target
360  * mm might be active on another cpu (eg debuggers doing the flushes on
361  * behalf of debugees, kswapd stealing pages from another process etc).
362  * Kanoj 07/00.
363  */
364 void flush_tlb_mm(struct mm_struct *mm)
365 {
366         preempt_disable();
367
368         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
369                 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
370         } else {
371                 int i;
372                 for_each_online_cpu(i)
373                         if (smp_processor_id() != i)
374                                 cpu_context(i, mm) = 0;
375         }
376         local_flush_tlb_mm(mm);
377
378         preempt_enable();
379 }
380
381 struct flush_tlb_data {
382         struct vm_area_struct *vma;
383         unsigned long addr1;
384         unsigned long addr2;
385 };
386
387 static void flush_tlb_range_ipi(void *info)
388 {
389         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
390
391         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
392 }
393
394 void flush_tlb_range(struct vm_area_struct *vma,
395                      unsigned long start, unsigned long end)
396 {
397         struct mm_struct *mm = vma->vm_mm;
398
399         preempt_disable();
400         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
401                 struct flush_tlb_data fd;
402
403                 fd.vma = vma;
404                 fd.addr1 = start;
405                 fd.addr2 = end;
406                 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
407         } else {
408                 int i;
409                 for_each_online_cpu(i)
410                         if (smp_processor_id() != i)
411                                 cpu_context(i, mm) = 0;
412         }
413         local_flush_tlb_range(vma, start, end);
414         preempt_enable();
415 }
416
417 static void flush_tlb_kernel_range_ipi(void *info)
418 {
419         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
420
421         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
422 }
423
424 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
425 {
426         struct flush_tlb_data fd;
427
428         fd.addr1 = start;
429         fd.addr2 = end;
430         on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
431 }
432
433 static void flush_tlb_page_ipi(void *info)
434 {
435         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
436
437         local_flush_tlb_page(fd->vma, fd->addr1);
438 }
439
440 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
441 {
442         preempt_disable();
443         if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
444             (current->mm != vma->vm_mm)) {
445                 struct flush_tlb_data fd;
446
447                 fd.vma = vma;
448                 fd.addr1 = page;
449                 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
450         } else {
451                 int i;
452                 for_each_online_cpu(i)
453                         if (smp_processor_id() != i)
454                                 cpu_context(i, vma->vm_mm) = 0;
455         }
456         local_flush_tlb_page(vma, page);
457         preempt_enable();
458 }
459
460 static void flush_tlb_one_ipi(void *info)
461 {
462         struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
463         local_flush_tlb_one(fd->addr1, fd->addr2);
464 }
465
466 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
467 {
468         struct flush_tlb_data fd;
469
470         fd.addr1 = asid;
471         fd.addr2 = vaddr;
472
473         smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
474         local_flush_tlb_one(asid, vaddr);
475 }
476
477 #endif