sparc64: Only allocate per-cpu areas for possible cpus.
[cascardo/linux.git] / arch / sparc / kernel / smp_64.c
index f7642e5..567a6a4 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/cache.h>
 #include <linux/jiffies.h>
 #include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
 #include <linux/cpu.h>
 
 #include <asm/head.h>
@@ -278,7 +278,7 @@ static unsigned long kimage_addr_to_ra(void *p)
        return kern_base + (val - KERNBASE);
 }
 
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
 {
        extern unsigned long sparc64_ttable_tl0;
        extern unsigned long kern_locked_tte_data;
@@ -298,12 +298,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
                       "hvtramp_descr.\n");
                return;
        }
+       *descrp = hdesc;
 
        hdesc->cpu = cpu;
        hdesc->num_mappings = num_kernel_image_mappings;
 
        tb = &trap_block[cpu];
-       tb->hdesc = hdesc;
 
        hdesc->fault_info_va = (unsigned long) &tb->fault_info;
        hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +341,12 @@ static struct thread_info *cpu_new_thread = NULL;
 
 static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 {
-       struct trap_per_cpu *tb = &trap_block[cpu];
        unsigned long entry =
                (unsigned long)(&sparc64_cpu_startup);
        unsigned long cookie =
                (unsigned long)(&cpu_new_thread);
        struct task_struct *p;
+       void *descr = NULL;
        int timeout, ret;
 
        p = fork_idle(cpu);
@@ -359,7 +359,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
                if (ldom_domaining_enabled)
                        ldom_startcpu_cpuid(cpu,
-                                           (unsigned long) cpu_new_thread);
+                                           (unsigned long) cpu_new_thread,
+                                           &descr);
                else
 #endif
                        prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +384,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
        }
        cpu_new_thread = NULL;
 
-       if (tb->hdesc) {
-               kfree(tb->hdesc);
-               tb->hdesc = NULL;
-       }
+       kfree(descr);
 
        return ret;
 }
@@ -1373,36 +1371,25 @@ void smp_send_stop(void)
 {
 }
 
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
-
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
-
-void __init real_setup_per_cpu_areas(void)
+void __init setup_per_cpu_areas(void)
 {
-       unsigned long paddr, goal, size, i;
+       unsigned long size, i, nr_possible_cpus = num_possible_cpus();
        char *ptr;
 
        /* Copy section for each CPU (we discard the original) */
-       goal = PERCPU_ENOUGH_ROOM;
-
-       __per_cpu_shift = PAGE_SHIFT;
-       for (size = PAGE_SIZE; size < goal; size <<= 1UL)
-               __per_cpu_shift++;
-
-       paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
-       if (!paddr) {
-               prom_printf("Cannot allocate per-cpu memory.\n");
-               prom_halt();
-       }
-
-       ptr = __va(paddr);
-       __per_cpu_base = ptr - __per_cpu_start;
+       size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+       ptr = alloc_bootmem_pages(size * nr_possible_cpus);
 
-       for (i = 0; i < NR_CPUS; i++, ptr += size)
+       for_each_possible_cpu(i) {
+               __per_cpu_offset(i) = ptr - __per_cpu_start;
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+               ptr += size;
+       }
 
        /* Setup %g5 for the boot cpu.  */
        __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+       of_fill_in_cpu_data();
+       if (tlb_type == hypervisor)
+               mdesc_fill_in_cpu_data(CPU_MASK_ALL_PTR);
 }