2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/bootmem.h>
14 #include <linux/sched.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 #include <linux/cpuset.h>
19 #include <asm/delay.h>
20 #include <asm/s390_ext.h>
21 #include <asm/sysinfo.h>
26 #define PTF_HORIZONTAL (0UL)
27 #define PTF_VERTICAL (1UL)
28 #define PTF_CHECK (2UL)
31 unsigned char reserved0[4];
34 unsigned char reserved1;
35 unsigned short origin;
36 unsigned long mask[CPU_BITS / BITS_PER_LONG];
40 unsigned char reserved[8];
46 struct tl_container container;
50 unsigned char reserved0[2];
51 unsigned short length;
52 unsigned char mag[NR_MAG];
53 unsigned char reserved1;
55 unsigned char reserved2[4];
56 union tl_entry tle[0];
60 struct core_info *next;
64 static int topology_enabled;
65 static void topology_work_fn(struct work_struct *work);
66 static struct tl_info *tl_info;
67 static struct core_info core_info;
68 static int machine_has_topology;
69 static struct timer_list topology_timer;
70 static void set_topology_timer(void);
71 static DECLARE_WORK(topology_work, topology_work_fn);
72 /* topology_lock protects the core linked list */
73 static DEFINE_SPINLOCK(topology_lock);
75 cpumask_t cpu_core_map[NR_CPUS];
77 static cpumask_t cpu_coregroup_map(unsigned int cpu)
79 struct core_info *core = &core_info;
84 if (!topology_enabled || !machine_has_topology)
85 return cpu_possible_map;
86 spin_lock_irqsave(&topology_lock, flags);
88 if (cpu_isset(cpu, core->mask)) {
94 spin_unlock_irqrestore(&topology_lock, flags);
96 mask = cpumask_of_cpu(cpu);
100 const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
102 return &cpu_core_map[cpu];
105 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
109 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
111 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
113 unsigned int rcpu, lcpu;
115 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
116 for_each_present_cpu(lcpu) {
117 if (__cpu_logical_map[lcpu] == rcpu) {
118 cpu_set(lcpu, core->mask);
119 smp_cpu_polarization[lcpu] = tl_cpu->pp;
125 static void clear_cores(void)
127 struct core_info *core = &core_info;
130 cpus_clear(core->mask);
135 static union tl_entry *next_tle(union tl_entry *tle)
138 return (union tl_entry *)((struct tl_container *)tle + 1);
140 return (union tl_entry *)((struct tl_cpu *)tle + 1);
143 static void tl_to_cores(struct tl_info *info)
145 union tl_entry *tle, *end;
146 struct core_info *core = &core_info;
148 spin_lock_irq(&topology_lock);
151 end = (union tl_entry *)((unsigned long)info + info->length);
163 add_cpus_to_core(&tle->cpu, core);
167 machine_has_topology = 0;
172 spin_unlock_irq(&topology_lock);
175 static void topology_update_polarization_simple(void)
179 mutex_lock(&smp_cpu_state_mutex);
180 for_each_possible_cpu(cpu)
181 smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
182 mutex_unlock(&smp_cpu_state_mutex);
185 static int ptf(unsigned long fc)
190 " .insn rre,0xb9a20000,%1,%1\n"
198 int topology_set_cpu_management(int fc)
203 if (!machine_has_topology)
206 rc = ptf(PTF_VERTICAL);
208 rc = ptf(PTF_HORIZONTAL);
211 for_each_possible_cpu(cpu)
212 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
216 static void update_cpu_core_map(void)
220 for_each_possible_cpu(cpu)
221 cpu_core_map[cpu] = cpu_coregroup_map(cpu);
224 int arch_update_cpu_topology(void)
226 struct tl_info *info = tl_info;
227 struct sys_device *sysdev;
230 if (!machine_has_topology) {
231 update_cpu_core_map();
232 topology_update_polarization_simple();
235 stsi(info, 15, 1, 2);
237 update_cpu_core_map();
238 for_each_online_cpu(cpu) {
239 sysdev = get_cpu_sysdev(cpu);
240 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
245 static void topology_work_fn(struct work_struct *work)
247 rebuild_sched_domains();
250 void topology_schedule_update(void)
252 schedule_work(&topology_work);
255 static void topology_timer_fn(unsigned long ignored)
258 topology_schedule_update();
259 set_topology_timer();
262 static void set_topology_timer(void)
264 topology_timer.function = topology_timer_fn;
265 topology_timer.data = 0;
266 topology_timer.expires = jiffies + 60 * HZ;
267 add_timer(&topology_timer);
270 static int __init early_parse_topology(char *p)
272 if (strncmp(p, "on", 2))
274 topology_enabled = 1;
277 early_param("topology", early_parse_topology);
279 static int __init init_topology_update(void)
284 if (!machine_has_topology) {
285 topology_update_polarization_simple();
288 init_timer_deferrable(&topology_timer);
289 set_topology_timer();
291 update_cpu_core_map();
294 __initcall(init_topology_update);
296 void __init s390_init_cpu_topology(void)
298 unsigned long long facility_bits;
299 struct tl_info *info;
300 struct core_info *core;
304 if (stfle(&facility_bits, 1) <= 0)
306 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
308 machine_has_topology = 1;
310 tl_info = alloc_bootmem_pages(PAGE_SIZE);
312 stsi(info, 15, 1, 2);
314 nr_cores = info->mag[NR_MAG - 2];
315 for (i = 0; i < info->mnest - 2; i++)
316 nr_cores *= info->mag[NR_MAG - 3 - i];
318 pr_info("The CPU configuration topology of the machine is:");
319 for (i = 0; i < NR_MAG; i++)
320 printk(" %d", info->mag[i]);
321 printk(" / %d\n", info->mnest);
324 for (i = 0; i < nr_cores; i++) {
325 core->next = alloc_bootmem(sizeof(struct core_info));
332 machine_has_topology = 0;