2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999,2007
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
26 #include <linux/err.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/delay.h>
30 #include <linux/cache.h>
31 #include <linux/interrupt.h>
32 #include <linux/cpu.h>
33 #include <linux/timex.h>
34 #include <linux/bootmem.h>
36 #include <asm/setup.h>
38 #include <asm/pgalloc.h>
40 #include <asm/s390_ext.h>
41 #include <asm/cpcmd.h>
42 #include <asm/tlbflush.h>
43 #include <asm/timer.h>
44 #include <asm/lowcore.h>
49 * An array with a pointer the lowcore of every CPU.
51 struct _lowcore *lowcore_ptr[NR_CPUS];
52 EXPORT_SYMBOL(lowcore_ptr);
54 cpumask_t cpu_online_map = CPU_MASK_NONE;
55 EXPORT_SYMBOL(cpu_online_map);
57 cpumask_t cpu_possible_map = CPU_MASK_ALL;
58 EXPORT_SYMBOL(cpu_possible_map);
60 static struct task_struct *current_set[NR_CPUS];
62 static u8 smp_cpu_type;
63 static int smp_use_sigp_detection;
70 #ifdef CONFIG_HOTPLUG_CPU
71 static DEFINE_MUTEX(smp_cpu_state_mutex);
73 static int smp_cpu_state[NR_CPUS];
75 static DEFINE_PER_CPU(struct cpu, cpu_devices);
76 DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
78 static void smp_ext_bitcall(int, ec_bit_sig);
81 * Structure and data for __smp_call_function_map(). This is designed to
82 * minimise static memory requirements. It also looks cleaner.
84 static DEFINE_SPINLOCK(call_lock);
86 struct call_data_struct {
87 void (*func) (void *info);
94 static struct call_data_struct *call_data;
97 * 'Call function' interrupt callback
99 static void do_call_function(void)
101 void (*func) (void *info) = call_data->func;
102 void *info = call_data->info;
103 int wait = call_data->wait;
105 cpu_set(smp_processor_id(), call_data->started);
108 cpu_set(smp_processor_id(), call_data->finished);;
111 static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int nonatomic, int wait, cpumask_t map)
114 struct call_data_struct data;
118 * Can deadlock when interrupts are disabled or if in wrong context.
120 WARN_ON(irqs_disabled() || in_irq());
123 * Check for local function call. We have to have the same call order
124 * as in on_each_cpu() because of machine_restart_smp().
126 if (cpu_isset(smp_processor_id(), map)) {
128 cpu_clear(smp_processor_id(), map);
131 cpus_and(map, map, cpu_online_map);
137 data.started = CPU_MASK_NONE;
140 data.finished = CPU_MASK_NONE;
142 spin_lock(&call_lock);
145 for_each_cpu_mask(cpu, map)
146 smp_ext_bitcall(cpu, ec_call_function);
148 /* Wait for response */
149 while (!cpus_equal(map, data.started))
152 while (!cpus_equal(map, data.finished))
154 spin_unlock(&call_lock);
165 * @func: the function to run; this must be fast and non-blocking
166 * @info: an arbitrary pointer to pass to the function
168 * @wait: if true, wait (atomically) until function has completed on other CPUs
170 * Run a function on all other CPUs.
172 * You must not call this function with disabled interrupts, from a
173 * hardware interrupt handler or from a bottom half.
175 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
181 map = cpu_online_map;
182 cpu_clear(smp_processor_id(), map);
183 __smp_call_function_map(func, info, nonatomic, wait, map);
187 EXPORT_SYMBOL(smp_call_function);
190 * smp_call_function_single:
191 * @cpu: the CPU where func should run
192 * @func: the function to run; this must be fast and non-blocking
193 * @info: an arbitrary pointer to pass to the function
195 * @wait: if true, wait (atomically) until function has completed on other CPUs
197 * Run a function on one processor.
199 * You must not call this function with disabled interrupts, from a
200 * hardware interrupt handler or from a bottom half.
202 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
203 int nonatomic, int wait)
206 __smp_call_function_map(func, info, nonatomic, wait,
207 cpumask_of_cpu(cpu));
211 EXPORT_SYMBOL(smp_call_function_single);
213 void smp_send_stop(void)
217 /* Disable all interrupts/machine checks */
218 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
220 /* write magic number to zero page (absolute 0) */
221 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
223 /* stop all processors */
224 for_each_online_cpu(cpu) {
225 if (cpu == smp_processor_id())
228 rc = signal_processor(cpu, sigp_stop);
229 } while (rc == sigp_busy);
231 while (!smp_cpu_not_running(cpu))
237 * This is the main routine where commands issued by other
241 static void do_ext_call_interrupt(__u16 code)
246 * handle bit signal external calls
248 * For the ec_schedule signal we have to do nothing. All the work
249 * is done automatically when we return from the interrupt.
251 bits = xchg(&S390_lowcore.ext_call_fast, 0);
253 if (test_bit(ec_call_function, &bits))
258 * Send an external call sigp to another cpu and return without waiting
259 * for its completion.
261 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
264 * Set signaling bit in lowcore of target cpu and kick it
266 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
267 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
273 * this function sends a 'purge tlb' signal to another CPU.
275 void smp_ptlb_callback(void *info)
280 void smp_ptlb_all(void)
282 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
284 EXPORT_SYMBOL(smp_ptlb_all);
285 #endif /* ! CONFIG_64BIT */
288 * this function sends a 'reschedule' IPI to another CPU.
289 * it goes straight through and wastes no time serializing
290 * anything. Worst case is that we lose a reschedule ...
292 void smp_send_reschedule(int cpu)
294 smp_ext_bitcall(cpu, ec_schedule);
298 * parameter area for the set/clear control bit callbacks
300 struct ec_creg_mask_parms {
301 unsigned long orvals[16];
302 unsigned long andvals[16];
306 * callback for setting/clearing control bits
308 static void smp_ctl_bit_callback(void *info)
310 struct ec_creg_mask_parms *pp = info;
311 unsigned long cregs[16];
314 __ctl_store(cregs, 0, 15);
315 for (i = 0; i <= 15; i++)
316 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
317 __ctl_load(cregs, 0, 15);
321 * Set a bit in a control register of all cpus
323 void smp_ctl_set_bit(int cr, int bit)
325 struct ec_creg_mask_parms parms;
327 memset(&parms.orvals, 0, sizeof(parms.orvals));
328 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
329 parms.orvals[cr] = 1 << bit;
330 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
332 EXPORT_SYMBOL(smp_ctl_set_bit);
335 * Clear a bit in a control register of all cpus
337 void smp_ctl_clear_bit(int cr, int bit)
339 struct ec_creg_mask_parms parms;
341 memset(&parms.orvals, 0, sizeof(parms.orvals));
342 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
343 parms.andvals[cr] = ~(1L << bit);
344 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
346 EXPORT_SYMBOL(smp_ctl_clear_bit);
349 * In early ipl state a temp. logically cpu number is needed, so the sigp
350 * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
351 * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
353 #define CPU_INIT_NO 1
355 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
358 * zfcpdump_prefix_array holds prefix registers for the following scenario:
359 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
360 * save its prefix registers, since they get lost, when switching from 31 bit
363 unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
364 __attribute__((__section__(".data")));
366 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
368 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
370 if (cpu >= NR_CPUS) {
371 printk(KERN_WARNING "Registers for cpu %i not saved since dump "
372 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
375 zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
376 __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
377 while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
380 memcpy(zfcpdump_save_areas[cpu],
381 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
384 /* copy original prefix register */
385 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
389 union save_area *zfcpdump_save_areas[NR_CPUS + 1];
390 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
394 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
396 #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
398 static int cpu_stopped(int cpu)
402 /* Check for stopped state */
403 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
404 sigp_status_stored) {
411 static int cpu_known(int cpu_id)
415 for_each_present_cpu(cpu) {
416 if (__cpu_logical_map[cpu] == cpu_id)
422 static int smp_rescan_cpus_sigp(cpumask_t avail)
424 int cpu_id, logical_cpu;
426 logical_cpu = first_cpu(avail);
427 if (logical_cpu == NR_CPUS)
429 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
430 if (cpu_known(cpu_id))
432 __cpu_logical_map[logical_cpu] = cpu_id;
433 if (!cpu_stopped(logical_cpu))
435 cpu_set(logical_cpu, cpu_present_map);
436 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
437 logical_cpu = next_cpu(logical_cpu, avail);
438 if (logical_cpu == NR_CPUS)
444 static int smp_rescan_cpus_sclp(cpumask_t avail)
446 struct sclp_cpu_info *info;
447 int cpu_id, logical_cpu, cpu;
450 logical_cpu = first_cpu(avail);
451 if (logical_cpu == NR_CPUS)
453 info = kmalloc(sizeof(*info), GFP_KERNEL);
456 rc = sclp_get_cpu_info(info);
459 for (cpu = 0; cpu < info->combined; cpu++) {
460 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
462 cpu_id = info->cpu[cpu].address;
463 if (cpu_known(cpu_id))
465 __cpu_logical_map[logical_cpu] = cpu_id;
466 cpu_set(logical_cpu, cpu_present_map);
467 if (cpu >= info->configured)
468 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
470 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
471 logical_cpu = next_cpu(logical_cpu, avail);
472 if (logical_cpu == NR_CPUS)
480 static int smp_rescan_cpus(void)
484 cpus_xor(avail, cpu_possible_map, cpu_present_map);
485 if (smp_use_sigp_detection)
486 return smp_rescan_cpus_sigp(avail);
488 return smp_rescan_cpus_sclp(avail);
491 static void __init smp_detect_cpus(void)
493 unsigned int cpu, c_cpus, s_cpus;
494 struct sclp_cpu_info *info;
495 u16 boot_cpu_addr, cpu_addr;
499 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
500 info = kmalloc(sizeof(*info), GFP_KERNEL);
502 panic("smp_detect_cpus failed to allocate memory\n");
503 /* Use sigp detection algorithm if sclp doesn't work. */
504 if (sclp_get_cpu_info(info)) {
505 smp_use_sigp_detection = 1;
506 for (cpu = 0; cpu <= 65535; cpu++) {
507 if (cpu == boot_cpu_addr)
509 __cpu_logical_map[CPU_INIT_NO] = cpu;
510 if (!cpu_stopped(CPU_INIT_NO))
512 smp_get_save_area(c_cpus, cpu);
518 if (info->has_cpu_type) {
519 for (cpu = 0; cpu < info->combined; cpu++) {
520 if (info->cpu[cpu].address == boot_cpu_addr) {
521 smp_cpu_type = info->cpu[cpu].type;
527 for (cpu = 0; cpu < info->combined; cpu++) {
528 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
530 cpu_addr = info->cpu[cpu].address;
531 if (cpu_addr == boot_cpu_addr)
533 __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
534 if (!cpu_stopped(CPU_INIT_NO)) {
538 smp_get_save_area(c_cpus, cpu_addr);
543 printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
546 unlock_cpu_hotplug();
550 * Activate a secondary processor.
552 int __cpuinit start_secondary(void *cpuvoid)
557 /* Enable TOD clock interrupts on the secondary cpu. */
559 #ifdef CONFIG_VIRT_TIMER
560 /* Enable cpu timer interrupts on the secondary cpu. */
563 /* Enable pfault pseudo page faults on this cpu. */
566 /* Mark this cpu as online */
567 cpu_set(smp_processor_id(), cpu_online_map);
568 /* Switch on interrupts */
570 /* Print info about this processor */
571 print_cpu_info(&S390_lowcore.cpu_data);
572 /* cpu_idle will call schedule for us */
577 static void __init smp_create_idle(unsigned int cpu)
579 struct task_struct *p;
582 * don't care about the psw and regs settings since we'll never
583 * reschedule the forked task.
587 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
588 current_set[cpu] = p;
589 spin_lock_init(&(&per_cpu(s390_idle, cpu))->lock);
592 static int __cpuinit smp_alloc_lowcore(int cpu)
594 unsigned long async_stack, panic_stack;
595 struct _lowcore *lowcore;
598 lc_order = sizeof(long) == 8 ? 1 : 0;
599 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
602 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
604 goto out_async_stack;
605 panic_stack = __get_free_page(GFP_KERNEL);
607 goto out_panic_stack;
609 *lowcore = S390_lowcore;
610 lowcore->async_stack = async_stack + ASYNC_SIZE;
611 lowcore->panic_stack = panic_stack + PAGE_SIZE;
614 if (MACHINE_HAS_IEEE) {
615 unsigned long save_area;
617 save_area = get_zeroed_page(GFP_KERNEL);
620 lowcore->extended_save_area_addr = (u32) save_area;
623 lowcore_ptr[cpu] = lowcore;
628 free_page(panic_stack);
631 free_pages(async_stack, ASYNC_ORDER);
633 free_pages((unsigned long) lowcore, lc_order);
637 #ifdef CONFIG_HOTPLUG_CPU
638 static void smp_free_lowcore(int cpu)
640 struct _lowcore *lowcore;
643 lc_order = sizeof(long) == 8 ? 1 : 0;
644 lowcore = lowcore_ptr[cpu];
646 if (MACHINE_HAS_IEEE)
647 free_page((unsigned long) lowcore->extended_save_area_addr);
649 free_page(lowcore->panic_stack - PAGE_SIZE);
650 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
651 free_pages((unsigned long) lowcore, lc_order);
652 lowcore_ptr[cpu] = NULL;
654 #endif /* CONFIG_HOTPLUG_CPU */
656 /* Upping and downing of CPUs */
657 int __cpuinit __cpu_up(unsigned int cpu)
659 struct task_struct *idle;
660 struct _lowcore *cpu_lowcore;
661 struct stack_frame *sf;
664 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
666 if (smp_alloc_lowcore(cpu))
669 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
670 cpu, sigp_set_prefix);
672 printk("sigp_set_prefix failed for cpu %d "
673 "with condition code %d\n",
674 (int) cpu, (int) ccode);
678 idle = current_set[cpu];
679 cpu_lowcore = lowcore_ptr[cpu];
680 cpu_lowcore->kernel_stack = (unsigned long)
681 task_stack_page(idle) + THREAD_SIZE;
682 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
683 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
684 - sizeof(struct pt_regs)
685 - sizeof(struct stack_frame));
686 memset(sf, 0, sizeof(struct stack_frame));
687 sf->gprs[9] = (unsigned long) sf;
688 cpu_lowcore->save_area[15] = (unsigned long) sf;
689 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
692 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
693 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
694 cpu_lowcore->current_task = (unsigned long) idle;
695 cpu_lowcore->cpu_data.cpu_nr = cpu;
696 cpu_lowcore->softirq_pending = 0;
697 cpu_lowcore->ext_call_fast = 0;
700 while (signal_processor(cpu, sigp_restart) == sigp_busy)
703 while (!cpu_online(cpu))
708 static int __init setup_possible_cpus(char *s)
712 pcpus = simple_strtoul(s, NULL, 0);
713 cpu_possible_map = cpumask_of_cpu(0);
714 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
715 cpu_set(cpu, cpu_possible_map);
718 early_param("possible_cpus", setup_possible_cpus);
720 #ifdef CONFIG_HOTPLUG_CPU
722 int __cpu_disable(void)
724 struct ec_creg_mask_parms cr_parms;
725 int cpu = smp_processor_id();
727 cpu_clear(cpu, cpu_online_map);
729 /* Disable pfault pseudo page faults on this cpu. */
732 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
733 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
735 /* disable all external interrupts */
736 cr_parms.orvals[0] = 0;
737 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
738 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
739 /* disable all I/O interrupts */
740 cr_parms.orvals[6] = 0;
741 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
742 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
743 /* disable most machine checks */
744 cr_parms.orvals[14] = 0;
745 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
748 smp_ctl_bit_callback(&cr_parms);
753 void __cpu_die(unsigned int cpu)
755 /* Wait until target cpu is down */
756 while (!smp_cpu_not_running(cpu))
758 smp_free_lowcore(cpu);
759 printk(KERN_INFO "Processor %d spun down\n", cpu);
765 signal_processor(smp_processor_id(), sigp_stop);
770 #endif /* CONFIG_HOTPLUG_CPU */
772 void __init smp_prepare_cpus(unsigned int max_cpus)
778 /* request the 0x1201 emergency signal external interrupt */
779 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
780 panic("Couldn't request external interrupt 0x1201");
781 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
782 print_cpu_info(&S390_lowcore.cpu_data);
783 smp_alloc_lowcore(smp_processor_id());
786 if (MACHINE_HAS_IEEE)
787 ctl_set_bit(14, 29); /* enable extended save area */
789 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
791 for_each_possible_cpu(cpu)
792 if (cpu != smp_processor_id())
793 smp_create_idle(cpu);
796 void __init smp_prepare_boot_cpu(void)
798 BUG_ON(smp_processor_id() != 0);
800 current_thread_info()->cpu = 0;
801 cpu_set(0, cpu_present_map);
802 cpu_set(0, cpu_online_map);
803 S390_lowcore.percpu_offset = __per_cpu_offset[0];
804 current_set[0] = current;
805 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
806 spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
809 void __init smp_cpus_done(unsigned int max_cpus)
814 * the frequency of the profiling timer can be changed
815 * by writing a multiplier value into /proc/profile.
817 * usually you want to run this on all CPUs ;)
819 int setup_profiling_timer(unsigned int multiplier)
824 #ifdef CONFIG_HOTPLUG_CPU
825 static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
829 mutex_lock(&smp_cpu_state_mutex);
830 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
831 mutex_unlock(&smp_cpu_state_mutex);
835 static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
842 if (sscanf(buf, "%d %c", &val, &delim) != 1)
844 if (val != 0 && val != 1)
847 mutex_lock(&smp_cpu_state_mutex);
855 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
856 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
858 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
862 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
863 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
865 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
872 unlock_cpu_hotplug();
873 mutex_unlock(&smp_cpu_state_mutex);
874 return rc ? rc : count;
876 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
877 #endif /* CONFIG_HOTPLUG_CPU */
879 static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
881 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
883 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
886 static struct attribute *cpu_common_attrs[] = {
887 #ifdef CONFIG_HOTPLUG_CPU
888 &attr_configure.attr,
894 static struct attribute_group cpu_common_attr_group = {
895 .attrs = cpu_common_attrs,
898 static ssize_t show_capability(struct sys_device *dev, char *buf)
900 unsigned int capability;
903 rc = get_cpu_capability(&capability);
906 return sprintf(buf, "%u\n", capability);
908 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
910 static ssize_t show_idle_count(struct sys_device *dev, char *buf)
912 struct s390_idle_data *idle;
913 unsigned long long idle_count;
915 idle = &per_cpu(s390_idle, dev->id);
916 spin_lock_irq(&idle->lock);
917 idle_count = idle->idle_count;
918 spin_unlock_irq(&idle->lock);
919 return sprintf(buf, "%llu\n", idle_count);
921 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
923 static ssize_t show_idle_time(struct sys_device *dev, char *buf)
925 struct s390_idle_data *idle;
926 unsigned long long new_time;
928 idle = &per_cpu(s390_idle, dev->id);
929 spin_lock_irq(&idle->lock);
931 new_time = get_clock();
932 idle->idle_time += new_time - idle->idle_enter;
933 idle->idle_enter = new_time;
935 new_time = idle->idle_time;
936 spin_unlock_irq(&idle->lock);
937 return sprintf(buf, "%llu\n", new_time >> 12);
939 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
941 static struct attribute *cpu_online_attrs[] = {
942 &attr_capability.attr,
943 &attr_idle_count.attr,
944 &attr_idle_time_us.attr,
948 static struct attribute_group cpu_online_attr_group = {
949 .attrs = cpu_online_attrs,
952 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
953 unsigned long action, void *hcpu)
955 unsigned int cpu = (unsigned int)(long)hcpu;
956 struct cpu *c = &per_cpu(cpu_devices, cpu);
957 struct sys_device *s = &c->sysdev;
958 struct s390_idle_data *idle;
962 case CPU_ONLINE_FROZEN:
963 idle = &per_cpu(s390_idle, cpu);
964 spin_lock_irq(&idle->lock);
965 idle->idle_enter = 0;
967 idle->idle_count = 0;
968 spin_unlock_irq(&idle->lock);
969 if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
973 case CPU_DEAD_FROZEN:
974 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
980 static struct notifier_block __cpuinitdata smp_cpu_nb = {
981 .notifier_call = smp_cpu_notify,
984 static int smp_add_present_cpu(int cpu)
986 struct cpu *c = &per_cpu(cpu_devices, cpu);
987 struct sys_device *s = &c->sysdev;
991 rc = register_cpu(c, cpu);
994 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
997 if (!cpu_online(cpu))
999 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1002 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1004 #ifdef CONFIG_HOTPLUG_CPU
1011 #ifdef CONFIG_HOTPLUG_CPU
1012 static ssize_t rescan_store(struct sys_device *dev, const char *buf,
1019 mutex_lock(&smp_cpu_state_mutex);
1021 newcpus = cpu_present_map;
1022 rc = smp_rescan_cpus();
1025 cpus_andnot(newcpus, cpu_present_map, newcpus);
1026 for_each_cpu_mask(cpu, newcpus) {
1027 rc = smp_add_present_cpu(cpu);
1029 cpu_clear(cpu, cpu_present_map);
1033 unlock_cpu_hotplug();
1034 mutex_unlock(&smp_cpu_state_mutex);
1035 return rc ? rc : count;
1037 static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1038 #endif /* CONFIG_HOTPLUG_CPU */
1040 static int __init topology_init(void)
1045 register_cpu_notifier(&smp_cpu_nb);
1047 #ifdef CONFIG_HOTPLUG_CPU
1048 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
1053 for_each_present_cpu(cpu) {
1054 rc = smp_add_present_cpu(cpu);
1060 subsys_initcall(topology_init);