sched: zap the migration init / cache-hot balancing code
[cascardo/linux.git] / arch / sparc64 / kernel / smp.c
index c550bba..40e40f9 100644 (file)
@@ -44,6 +44,8 @@
 
 extern void calibrate_delay(void);
 
+int sparc64_multi_core __read_mostly;
+
 /* Please don't make this stuff initdata!!!  --DaveM */
 unsigned char boot_cpu_id;
 
@@ -51,6 +53,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
+       { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 static cpumask_t smp_commenced_mask;
 static cpumask_t cpu_callout_map;
 
@@ -1159,32 +1163,6 @@ int setup_profiling_timer(unsigned int multiplier)
        return -EINVAL;
 }
 
-static void __init smp_tune_scheduling(void)
-{
-       unsigned int smallest = ~0U;
-       int i;
-
-       for (i = 0; i < NR_CPUS; i++) {
-               unsigned int val = cpu_data(i).ecache_size;
-
-               if (val && val < smallest)
-                       smallest = val;
-       }
-
-       /* Any value less than 256K is nonsense.  */
-       if (smallest < (256U * 1024U))
-               smallest = 256 * 1024;
-
-       max_cache_size = smallest;
-
-       if (smallest < 1U * 1024U * 1024U)
-               printk(KERN_INFO "Using max_cache_size of %uKB\n",
-                      smallest / 1024U);
-       else
-               printk(KERN_INFO "Using max_cache_size of %uMB\n",
-                      smallest / 1024U / 1024U);
-}
-
 /* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
@@ -1202,7 +1180,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        }
 
        cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
-       smp_tune_scheduling();
 }
 
 void __devinit smp_prepare_boot_cpu(void)
@@ -1217,13 +1194,28 @@ void __devinit smp_fill_in_sib_core_maps(void)
                unsigned int j;
 
                if (cpu_data(i).core_id == 0) {
-                       cpu_set(i, cpu_sibling_map[i]);
+                       cpu_set(i, cpu_core_map[i]);
                        continue;
                }
 
                for_each_possible_cpu(j) {
                        if (cpu_data(i).core_id ==
                            cpu_data(j).core_id)
+                               cpu_set(j, cpu_core_map[i]);
+               }
+       }
+
+       for_each_possible_cpu(i) {
+               unsigned int j;
+
+               if (cpu_data(i).proc_id == -1) {
+                       cpu_set(i, cpu_sibling_map[i]);
+                       continue;
+               }
+
+               for_each_possible_cpu(j) {
+                       if (cpu_data(i).proc_id ==
+                           cpu_data(j).proc_id)
                                cpu_set(j, cpu_sibling_map[i]);
                }
        }