2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/workqueue.h>
10 #include <linux/cpuset.h>
11 #include <linux/device.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/smp.h>
21 #include <linux/nodemask.h>
22 #include <linux/node.h>
23 #include <asm/sysinfo.h>
26 #define PTF_HORIZONTAL (0UL)
27 #define PTF_VERTICAL (1UL)
28 #define PTF_CHECK (2UL)
31 struct mask_info *next;
36 static void set_topology_timer(void);
37 static void topology_work_fn(struct work_struct *work);
38 static struct sysinfo_15_1_x *tl_info;
40 static bool topology_enabled = true;
41 static DECLARE_WORK(topology_work, topology_work_fn);
44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are
45 * protected by "sched_domains_mutex".
47 static struct mask_info socket_info;
48 static struct mask_info book_info;
49 static struct mask_info drawer_info;
51 DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
52 EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
54 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
58 cpumask_copy(&mask, cpumask_of(cpu));
59 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
61 for (; info; info = info->next) {
62 if (cpumask_test_cpu(cpu, &info->mask))
68 static cpumask_t cpu_thread_map(unsigned int cpu)
73 cpumask_copy(&mask, cpumask_of(cpu));
74 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
76 cpu -= cpu % (smp_cpu_mtid + 1);
77 for (i = 0; i <= smp_cpu_mtid; i++)
78 if (cpu_present(cpu + i))
79 cpumask_set_cpu(cpu + i, &mask);
83 static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
84 struct mask_info *drawer,
85 struct mask_info *book,
86 struct mask_info *socket,
87 int one_socket_per_cpu)
89 struct cpu_topology_s390 *topo;
92 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
96 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
97 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
100 for (i = 0; i <= smp_cpu_mtid; i++) {
101 topo = &per_cpu(cpu_topology, lcpu + i);
102 topo->drawer_id = drawer->id;
103 topo->book_id = book->id;
104 topo->core_id = rcore;
105 topo->thread_id = lcpu + i;
106 cpumask_set_cpu(lcpu + i, &drawer->mask);
107 cpumask_set_cpu(lcpu + i, &book->mask);
108 cpumask_set_cpu(lcpu + i, &socket->mask);
109 if (one_socket_per_cpu)
110 topo->socket_id = rcore;
112 topo->socket_id = socket->id;
113 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
115 if (one_socket_per_cpu)
116 socket = socket->next;
121 static void clear_masks(void)
123 struct mask_info *info;
127 cpumask_clear(&info->mask);
132 cpumask_clear(&info->mask);
137 cpumask_clear(&info->mask);
142 static union topology_entry *next_tle(union topology_entry *tle)
145 return (union topology_entry *)((struct topology_core *)tle + 1);
146 return (union topology_entry *)((struct topology_container *)tle + 1);
149 static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
151 struct mask_info *socket = &socket_info;
152 struct mask_info *book = &book_info;
153 struct mask_info *drawer = &drawer_info;
154 union topology_entry *tle, *end;
157 end = (union topology_entry *)((unsigned long)info + info->length);
161 drawer = drawer->next;
162 drawer->id = tle->container.id;
166 book->id = tle->container.id;
169 socket = socket->next;
170 socket->id = tle->container.id;
173 add_cpus_to_mask(&tle->cpu, drawer, book, socket, 0);
183 static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
185 struct mask_info *socket = &socket_info;
186 struct mask_info *book = &book_info;
187 struct mask_info *drawer = &drawer_info;
188 union topology_entry *tle, *end;
191 end = (union topology_entry *)((unsigned long)info + info->length);
196 book->id = tle->container.id;
199 socket = add_cpus_to_mask(&tle->cpu, drawer, book, socket, 1);
209 static void tl_to_masks(struct sysinfo_15_1_x *info)
215 switch (cpu_id.machine) {
218 __tl_to_masks_z10(info);
221 __tl_to_masks_generic(info);
225 static void topology_update_polarization_simple(void)
229 mutex_lock(&smp_cpu_state_mutex);
230 for_each_possible_cpu(cpu)
231 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
232 mutex_unlock(&smp_cpu_state_mutex);
235 static int ptf(unsigned long fc)
240 " .insn rre,0xb9a20000,%1,%1\n"
248 int topology_set_cpu_management(int fc)
252 if (!MACHINE_HAS_TOPOLOGY)
255 rc = ptf(PTF_VERTICAL);
257 rc = ptf(PTF_HORIZONTAL);
260 for_each_possible_cpu(cpu)
261 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
265 static void update_cpu_masks(void)
267 struct cpu_topology_s390 *topo;
270 for_each_possible_cpu(cpu) {
271 topo = &per_cpu(cpu_topology, cpu);
272 topo->thread_mask = cpu_thread_map(cpu);
273 topo->core_mask = cpu_group_map(&socket_info, cpu);
274 topo->book_mask = cpu_group_map(&book_info, cpu);
275 topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
276 if (!MACHINE_HAS_TOPOLOGY) {
277 topo->thread_id = cpu;
279 topo->socket_id = cpu;
281 topo->drawer_id = cpu;
284 numa_update_cpu_topology();
287 void store_topology(struct sysinfo_15_1_x *info)
289 stsi(info, 15, 1, min(topology_max_mnest, 4));
292 int arch_update_cpu_topology(void)
294 struct sysinfo_15_1_x *info = tl_info;
298 if (MACHINE_HAS_TOPOLOGY) {
300 store_topology(info);
304 if (!MACHINE_HAS_TOPOLOGY)
305 topology_update_polarization_simple();
306 for_each_online_cpu(cpu) {
307 dev = get_cpu_device(cpu);
308 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
313 static void topology_work_fn(struct work_struct *work)
315 rebuild_sched_domains();
318 void topology_schedule_update(void)
320 schedule_work(&topology_work);
323 static void topology_timer_fn(unsigned long ignored)
326 topology_schedule_update();
327 set_topology_timer();
330 static struct timer_list topology_timer =
331 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
333 static atomic_t topology_poll = ATOMIC_INIT(0);
335 static void set_topology_timer(void)
337 if (atomic_add_unless(&topology_poll, -1, 0))
338 mod_timer(&topology_timer, jiffies + HZ / 10);
340 mod_timer(&topology_timer, jiffies + HZ * 60);
343 void topology_expect_change(void)
345 if (!MACHINE_HAS_TOPOLOGY)
347 /* This is racy, but it doesn't matter since it is just a heuristic.
348 * Worst case is that we poll in a higher frequency for a bit longer.
350 if (atomic_read(&topology_poll) > 60)
352 atomic_add(60, &topology_poll);
353 set_topology_timer();
356 static int cpu_management;
358 static ssize_t dispatching_show(struct device *dev,
359 struct device_attribute *attr,
364 mutex_lock(&smp_cpu_state_mutex);
365 count = sprintf(buf, "%d\n", cpu_management);
366 mutex_unlock(&smp_cpu_state_mutex);
370 static ssize_t dispatching_store(struct device *dev,
371 struct device_attribute *attr,
378 if (sscanf(buf, "%d %c", &val, &delim) != 1)
380 if (val != 0 && val != 1)
384 mutex_lock(&smp_cpu_state_mutex);
385 if (cpu_management == val)
387 rc = topology_set_cpu_management(val);
390 cpu_management = val;
391 topology_expect_change();
393 mutex_unlock(&smp_cpu_state_mutex);
395 return rc ? rc : count;
397 static DEVICE_ATTR(dispatching, 0644, dispatching_show,
400 static ssize_t cpu_polarization_show(struct device *dev,
401 struct device_attribute *attr, char *buf)
406 mutex_lock(&smp_cpu_state_mutex);
407 switch (smp_cpu_get_polarization(cpu)) {
408 case POLARIZATION_HRZ:
409 count = sprintf(buf, "horizontal\n");
411 case POLARIZATION_VL:
412 count = sprintf(buf, "vertical:low\n");
414 case POLARIZATION_VM:
415 count = sprintf(buf, "vertical:medium\n");
417 case POLARIZATION_VH:
418 count = sprintf(buf, "vertical:high\n");
421 count = sprintf(buf, "unknown\n");
424 mutex_unlock(&smp_cpu_state_mutex);
427 static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
429 static struct attribute *topology_cpu_attrs[] = {
430 &dev_attr_polarization.attr,
434 static struct attribute_group topology_cpu_attr_group = {
435 .attrs = topology_cpu_attrs,
438 int topology_cpu_init(struct cpu *cpu)
440 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
443 static const struct cpumask *cpu_thread_mask(int cpu)
445 return &per_cpu(cpu_topology, cpu).thread_mask;
449 const struct cpumask *cpu_coregroup_mask(int cpu)
451 return &per_cpu(cpu_topology, cpu).core_mask;
454 static const struct cpumask *cpu_book_mask(int cpu)
456 return &per_cpu(cpu_topology, cpu).book_mask;
459 static const struct cpumask *cpu_drawer_mask(int cpu)
461 return &per_cpu(cpu_topology, cpu).drawer_mask;
464 static int __init early_parse_topology(char *p)
466 return kstrtobool(p, &topology_enabled);
468 early_param("topology", early_parse_topology);
470 static struct sched_domain_topology_level s390_topology[] = {
471 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
472 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
473 { cpu_book_mask, SD_INIT_NAME(BOOK) },
474 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
475 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
479 static void __init alloc_masks(struct sysinfo_15_1_x *info,
480 struct mask_info *mask, int offset)
484 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
485 for (i = 0; i < info->mnest - offset; i++)
486 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
487 nr_masks = max(nr_masks, 1);
488 for (i = 0; i < nr_masks; i++) {
489 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
494 static int __init s390_topology_init(void)
496 struct sysinfo_15_1_x *info;
499 if (!MACHINE_HAS_TOPOLOGY)
501 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
503 store_topology(info);
504 pr_info("The CPU configuration topology of the machine is:");
505 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
506 printk(KERN_CONT " %d", info->mag[i]);
507 printk(KERN_CONT " / %d\n", info->mnest);
508 alloc_masks(info, &socket_info, 1);
509 alloc_masks(info, &book_info, 2);
510 alloc_masks(info, &drawer_info, 3);
511 set_sched_topology(s390_topology);
514 early_initcall(s390_topology_init);
516 static int __init topology_init(void)
518 if (MACHINE_HAS_TOPOLOGY)
519 set_topology_timer();
521 topology_update_polarization_simple();
522 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
524 device_initcall(topology_init);