Merge branch 'drm-patches' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[linux-2.6] / arch / sparc64 / kernel / smp.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
25
26 #include <asm/head.h>
27 #include <asm/ptrace.h>
28 #include <asm/atomic.h>
29 #include <asm/tlbflush.h>
30 #include <asm/mmu_context.h>
31 #include <asm/cpudata.h>
32
33 #include <asm/irq.h>
34 #include <asm/page.h>
35 #include <asm/pgtable.h>
36 #include <asm/oplib.h>
37 #include <asm/uaccess.h>
38 #include <asm/timer.h>
39 #include <asm/starfire.h>
40 #include <asm/tlb.h>
41 #include <asm/sections.h>
42
43 extern void calibrate_delay(void);
44
45 /* Please don't make this stuff initdata!!!  --DaveM */
46 static unsigned char boot_cpu_id;
47
48 cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
49 cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
50 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
51         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
52 static cpumask_t smp_commenced_mask;
53 static cpumask_t cpu_callout_map;
54
55 void smp_info(struct seq_file *m)
56 {
57         int i;
58         
59         seq_printf(m, "State:\n");
60         for_each_online_cpu(i)
61                 seq_printf(m, "CPU%d:\t\tonline\n", i);
62 }
63
64 void smp_bogo(struct seq_file *m)
65 {
66         int i;
67         
68         for_each_online_cpu(i)
69                 seq_printf(m,
70                            "Cpu%dBogo\t: %lu.%02lu\n"
71                            "Cpu%dClkTck\t: %016lx\n",
72                            i, cpu_data(i).udelay_val / (500000/HZ),
73                            (cpu_data(i).udelay_val / (5000/HZ)) % 100,
74                            i, cpu_data(i).clock_tick);
75 }
76
77 void __init smp_store_cpu_info(int id)
78 {
79         int cpu_node, def;
80
81         /* multiplier and counter set by
82            smp_setup_percpu_timer()  */
83         cpu_data(id).udelay_val                 = loops_per_jiffy;
84
85         cpu_find_by_mid(id, &cpu_node);
86         cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
87                                                      "clock-frequency", 0);
88
89         def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
90         cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
91                                                       def);
92
93         def = 32;
94         cpu_data(id).dcache_line_size =
95                 prom_getintdefault(cpu_node, "dcache-line-size", def);
96
97         def = 16 * 1024;
98         cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
99                                                       def);
100
101         def = 32;
102         cpu_data(id).icache_line_size =
103                 prom_getintdefault(cpu_node, "icache-line-size", def);
104
105         def = ((tlb_type == hypervisor) ?
106                (3 * 1024 * 1024) :
107                (4 * 1024 * 1024));
108         cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
109                                                       def);
110
111         def = 64;
112         cpu_data(id).ecache_line_size =
113                 prom_getintdefault(cpu_node, "ecache-line-size", def);
114
115         printk("CPU[%d]: Caches "
116                "D[sz(%d):line_sz(%d)] "
117                "I[sz(%d):line_sz(%d)] "
118                "E[sz(%d):line_sz(%d)]\n",
119                id,
120                cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
121                cpu_data(id).icache_size, cpu_data(id).icache_line_size,
122                cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
123 }
124
125 static void smp_setup_percpu_timer(void);
126
127 static volatile unsigned long callin_flag = 0;
128
129 void __init smp_callin(void)
130 {
131         int cpuid = hard_smp_processor_id();
132
133         __local_per_cpu_offset = __per_cpu_offset(cpuid);
134
135         if (tlb_type == hypervisor)
136                 sun4v_ktsb_register();
137
138         __flush_tlb_all();
139
140         smp_setup_percpu_timer();
141
142         if (cheetah_pcache_forced_on)
143                 cheetah_enable_pcache();
144
145         local_irq_enable();
146
147         calibrate_delay();
148         smp_store_cpu_info(cpuid);
149         callin_flag = 1;
150         __asm__ __volatile__("membar #Sync\n\t"
151                              "flush  %%g6" : : : "memory");
152
153         /* Clear this or we will die instantly when we
154          * schedule back to this idler...
155          */
156         current_thread_info()->new_child = 0;
157
158         /* Attach to the address space of init_task. */
159         atomic_inc(&init_mm.mm_count);
160         current->active_mm = &init_mm;
161
162         while (!cpu_isset(cpuid, smp_commenced_mask))
163                 rmb();
164
165         cpu_set(cpuid, cpu_online_map);
166
167         /* idle thread is expected to have preempt disabled */
168         preempt_disable();
169 }
170
171 void cpu_panic(void)
172 {
173         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
174         panic("SMP bolixed\n");
175 }
176
177 static unsigned long current_tick_offset __read_mostly;
178
179 /* This tick register synchronization scheme is taken entirely from
180  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
181  *
182  * The only change I've made is to rework it so that the master
183  * initiates the synchonization instead of the slave. -DaveM
184  */
185
186 #define MASTER  0
187 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
188
189 #define NUM_ROUNDS      64      /* magic value */
190 #define NUM_ITERS       5       /* likewise */
191
192 static DEFINE_SPINLOCK(itc_sync_lock);
193 static unsigned long go[SLAVE + 1];
194
195 #define DEBUG_TICK_SYNC 0
196
197 static inline long get_delta (long *rt, long *master)
198 {
199         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
200         unsigned long tcenter, t0, t1, tm;
201         unsigned long i;
202
203         for (i = 0; i < NUM_ITERS; i++) {
204                 t0 = tick_ops->get_tick();
205                 go[MASTER] = 1;
206                 membar_storeload();
207                 while (!(tm = go[SLAVE]))
208                         rmb();
209                 go[SLAVE] = 0;
210                 wmb();
211                 t1 = tick_ops->get_tick();
212
213                 if (t1 - t0 < best_t1 - best_t0)
214                         best_t0 = t0, best_t1 = t1, best_tm = tm;
215         }
216
217         *rt = best_t1 - best_t0;
218         *master = best_tm - best_t0;
219
220         /* average best_t0 and best_t1 without overflow: */
221         tcenter = (best_t0/2 + best_t1/2);
222         if (best_t0 % 2 + best_t1 % 2 == 2)
223                 tcenter++;
224         return tcenter - best_tm;
225 }
226
227 void smp_synchronize_tick_client(void)
228 {
229         long i, delta, adj, adjust_latency = 0, done = 0;
230         unsigned long flags, rt, master_time_stamp, bound;
231 #if DEBUG_TICK_SYNC
232         struct {
233                 long rt;        /* roundtrip time */
234                 long master;    /* master's timestamp */
235                 long diff;      /* difference between midpoint and master's timestamp */
236                 long lat;       /* estimate of itc adjustment latency */
237         } t[NUM_ROUNDS];
238 #endif
239
240         go[MASTER] = 1;
241
242         while (go[MASTER])
243                 rmb();
244
245         local_irq_save(flags);
246         {
247                 for (i = 0; i < NUM_ROUNDS; i++) {
248                         delta = get_delta(&rt, &master_time_stamp);
249                         if (delta == 0) {
250                                 done = 1;       /* let's lock on to this... */
251                                 bound = rt;
252                         }
253
254                         if (!done) {
255                                 if (i > 0) {
256                                         adjust_latency += -delta;
257                                         adj = -delta + adjust_latency/4;
258                                 } else
259                                         adj = -delta;
260
261                                 tick_ops->add_tick(adj, current_tick_offset);
262                         }
263 #if DEBUG_TICK_SYNC
264                         t[i].rt = rt;
265                         t[i].master = master_time_stamp;
266                         t[i].diff = delta;
267                         t[i].lat = adjust_latency/4;
268 #endif
269                 }
270         }
271         local_irq_restore(flags);
272
273 #if DEBUG_TICK_SYNC
274         for (i = 0; i < NUM_ROUNDS; i++)
275                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
276                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
277 #endif
278
279         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
280                "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
281 }
282
283 static void smp_start_sync_tick_client(int cpu);
284
285 static void smp_synchronize_one_tick(int cpu)
286 {
287         unsigned long flags, i;
288
289         go[MASTER] = 0;
290
291         smp_start_sync_tick_client(cpu);
292
293         /* wait for client to be ready */
294         while (!go[MASTER])
295                 rmb();
296
297         /* now let the client proceed into his loop */
298         go[MASTER] = 0;
299         membar_storeload();
300
301         spin_lock_irqsave(&itc_sync_lock, flags);
302         {
303                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
304                         while (!go[MASTER])
305                                 rmb();
306                         go[MASTER] = 0;
307                         wmb();
308                         go[SLAVE] = tick_ops->get_tick();
309                         membar_storeload();
310                 }
311         }
312         spin_unlock_irqrestore(&itc_sync_lock, flags);
313 }
314
315 extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
316
317 extern unsigned long sparc64_cpu_startup;
318
319 /* The OBP cpu startup callback truncates the 3rd arg cookie to
320  * 32-bits (I think) so to be safe we have it read the pointer
321  * contained here so we work on >4GB machines. -DaveM
322  */
323 static struct thread_info *cpu_new_thread = NULL;
324
325 static int __devinit smp_boot_one_cpu(unsigned int cpu)
326 {
327         unsigned long entry =
328                 (unsigned long)(&sparc64_cpu_startup);
329         unsigned long cookie =
330                 (unsigned long)(&cpu_new_thread);
331         struct task_struct *p;
332         int timeout, ret;
333
334         p = fork_idle(cpu);
335         callin_flag = 0;
336         cpu_new_thread = task_thread_info(p);
337         cpu_set(cpu, cpu_callout_map);
338
339         if (tlb_type == hypervisor) {
340                 /* Alloc the mondo queues, cpu will load them.  */
341                 sun4v_init_mondo_queues(0, cpu, 1, 0);
342
343                 prom_startcpu_cpuid(cpu, entry, cookie);
344         } else {
345                 int cpu_node;
346
347                 cpu_find_by_mid(cpu, &cpu_node);
348                 prom_startcpu(cpu_node, entry, cookie);
349         }
350
351         for (timeout = 0; timeout < 5000000; timeout++) {
352                 if (callin_flag)
353                         break;
354                 udelay(100);
355         }
356
357         if (callin_flag) {
358                 ret = 0;
359         } else {
360                 printk("Processor %d is stuck.\n", cpu);
361                 cpu_clear(cpu, cpu_callout_map);
362                 ret = -ENODEV;
363         }
364         cpu_new_thread = NULL;
365
366         return ret;
367 }
368
369 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
370 {
371         u64 result, target;
372         int stuck, tmp;
373
374         if (this_is_starfire) {
375                 /* map to real upaid */
376                 cpu = (((cpu & 0x3c) << 1) |
377                         ((cpu & 0x40) >> 4) |
378                         (cpu & 0x3));
379         }
380
381         target = (cpu << 14) | 0x70;
382 again:
383         /* Ok, this is the real Spitfire Errata #54.
384          * One must read back from a UDB internal register
385          * after writes to the UDB interrupt dispatch, but
386          * before the membar Sync for that write.
387          * So we use the high UDB control register (ASI 0x7f,
388          * ADDR 0x20) for the dummy read. -DaveM
389          */
390         tmp = 0x40;
391         __asm__ __volatile__(
392         "wrpr   %1, %2, %%pstate\n\t"
393         "stxa   %4, [%0] %3\n\t"
394         "stxa   %5, [%0+%8] %3\n\t"
395         "add    %0, %8, %0\n\t"
396         "stxa   %6, [%0+%8] %3\n\t"
397         "membar #Sync\n\t"
398         "stxa   %%g0, [%7] %3\n\t"
399         "membar #Sync\n\t"
400         "mov    0x20, %%g1\n\t"
401         "ldxa   [%%g1] 0x7f, %%g0\n\t"
402         "membar #Sync"
403         : "=r" (tmp)
404         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
405           "r" (data0), "r" (data1), "r" (data2), "r" (target),
406           "r" (0x10), "0" (tmp)
407         : "g1");
408
409         /* NOTE: PSTATE_IE is still clear. */
410         stuck = 100000;
411         do {
412                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
413                         : "=r" (result)
414                         : "i" (ASI_INTR_DISPATCH_STAT));
415                 if (result == 0) {
416                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
417                                              : : "r" (pstate));
418                         return;
419                 }
420                 stuck -= 1;
421                 if (stuck == 0)
422                         break;
423         } while (result & 0x1);
424         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
425                              : : "r" (pstate));
426         if (stuck == 0) {
427                 printk("CPU[%d]: mondo stuckage result[%016lx]\n",
428                        smp_processor_id(), result);
429         } else {
430                 udelay(2);
431                 goto again;
432         }
433 }
434
435 static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
436 {
437         u64 pstate;
438         int i;
439
440         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
441         for_each_cpu_mask(i, mask)
442                 spitfire_xcall_helper(data0, data1, data2, pstate, i);
443 }
444
445 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
446  * packet, but we have no use for that.  However we do take advantage of
447  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
448  */
449 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
450 {
451         u64 pstate, ver;
452         int nack_busy_id, is_jbus;
453
454         if (cpus_empty(mask))
455                 return;
456
457         /* Unfortunately, someone at Sun had the brilliant idea to make the
458          * busy/nack fields hard-coded by ITID number for this Ultra-III
459          * derivative processor.
460          */
461         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
462         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
463                    (ver >> 32) == __SERRANO_ID);
464
465         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
466
467 retry:
468         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
469                              : : "r" (pstate), "i" (PSTATE_IE));
470
471         /* Setup the dispatch data registers. */
472         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
473                              "stxa      %1, [%4] %6\n\t"
474                              "stxa      %2, [%5] %6\n\t"
475                              "membar    #Sync\n\t"
476                              : /* no outputs */
477                              : "r" (data0), "r" (data1), "r" (data2),
478                                "r" (0x40), "r" (0x50), "r" (0x60),
479                                "i" (ASI_INTR_W));
480
481         nack_busy_id = 0;
482         {
483                 int i;
484
485                 for_each_cpu_mask(i, mask) {
486                         u64 target = (i << 14) | 0x70;
487
488                         if (!is_jbus)
489                                 target |= (nack_busy_id << 24);
490                         __asm__ __volatile__(
491                                 "stxa   %%g0, [%0] %1\n\t"
492                                 "membar #Sync\n\t"
493                                 : /* no outputs */
494                                 : "r" (target), "i" (ASI_INTR_W));
495                         nack_busy_id++;
496                 }
497         }
498
499         /* Now, poll for completion. */
500         {
501                 u64 dispatch_stat;
502                 long stuck;
503
504                 stuck = 100000 * nack_busy_id;
505                 do {
506                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
507                                              : "=r" (dispatch_stat)
508                                              : "i" (ASI_INTR_DISPATCH_STAT));
509                         if (dispatch_stat == 0UL) {
510                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
511                                                      : : "r" (pstate));
512                                 return;
513                         }
514                         if (!--stuck)
515                                 break;
516                 } while (dispatch_stat & 0x5555555555555555UL);
517
518                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
519                                      : : "r" (pstate));
520
521                 if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
522                         /* Busy bits will not clear, continue instead
523                          * of freezing up on this cpu.
524                          */
525                         printk("CPU[%d]: mondo stuckage result[%016lx]\n",
526                                smp_processor_id(), dispatch_stat);
527                 } else {
528                         int i, this_busy_nack = 0;
529
530                         /* Delay some random time with interrupts enabled
531                          * to prevent deadlock.
532                          */
533                         udelay(2 * nack_busy_id);
534
535                         /* Clear out the mask bits for cpus which did not
536                          * NACK us.
537                          */
538                         for_each_cpu_mask(i, mask) {
539                                 u64 check_mask;
540
541                                 if (is_jbus)
542                                         check_mask = (0x2UL << (2*i));
543                                 else
544                                         check_mask = (0x2UL <<
545                                                       this_busy_nack);
546                                 if ((dispatch_stat & check_mask) == 0)
547                                         cpu_clear(i, mask);
548                                 this_busy_nack += 2;
549                         }
550
551                         goto retry;
552                 }
553         }
554 }
555
556 /* Multi-cpu list version.  */
557 static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
558 {
559         struct trap_per_cpu *tb;
560         u16 *cpu_list;
561         u64 *mondo;
562         cpumask_t error_mask;
563         unsigned long flags, status;
564         int cnt, retries, this_cpu, prev_sent, i;
565
566         /* We have to do this whole thing with interrupts fully disabled.
567          * Otherwise if we send an xcall from interrupt context it will
568          * corrupt both our mondo block and cpu list state.
569          *
570          * One consequence of this is that we cannot use timeout mechanisms
571          * that depend upon interrupts being delivered locally.  So, for
572          * example, we cannot sample jiffies and expect it to advance.
573          *
574          * Fortunately, udelay() uses %stick/%tick so we can use that.
575          */
576         local_irq_save(flags);
577
578         this_cpu = smp_processor_id();
579         tb = &trap_block[this_cpu];
580
581         mondo = __va(tb->cpu_mondo_block_pa);
582         mondo[0] = data0;
583         mondo[1] = data1;
584         mondo[2] = data2;
585         wmb();
586
587         cpu_list = __va(tb->cpu_list_pa);
588
589         /* Setup the initial cpu list.  */
590         cnt = 0;
591         for_each_cpu_mask(i, mask)
592                 cpu_list[cnt++] = i;
593
594         cpus_clear(error_mask);
595         retries = 0;
596         prev_sent = 0;
597         do {
598                 int forward_progress, n_sent;
599
600                 status = sun4v_cpu_mondo_send(cnt,
601                                               tb->cpu_list_pa,
602                                               tb->cpu_mondo_block_pa);
603
604                 /* HV_EOK means all cpus received the xcall, we're done.  */
605                 if (likely(status == HV_EOK))
606                         break;
607
608                 /* First, see if we made any forward progress.
609                  *
610                  * The hypervisor indicates successful sends by setting
611                  * cpu list entries to the value 0xffff.
612                  */
613                 n_sent = 0;
614                 for (i = 0; i < cnt; i++) {
615                         if (likely(cpu_list[i] == 0xffff))
616                                 n_sent++;
617                 }
618
619                 forward_progress = 0;
620                 if (n_sent > prev_sent)
621                         forward_progress = 1;
622
623                 prev_sent = n_sent;
624
625                 /* If we get a HV_ECPUERROR, then one or more of the cpus
626                  * in the list are in error state.  Use the cpu_state()
627                  * hypervisor call to find out which cpus are in error state.
628                  */
629                 if (unlikely(status == HV_ECPUERROR)) {
630                         for (i = 0; i < cnt; i++) {
631                                 long err;
632                                 u16 cpu;
633
634                                 cpu = cpu_list[i];
635                                 if (cpu == 0xffff)
636                                         continue;
637
638                                 err = sun4v_cpu_state(cpu);
639                                 if (err >= 0 &&
640                                     err == HV_CPU_STATE_ERROR) {
641                                         cpu_list[i] = 0xffff;
642                                         cpu_set(cpu, error_mask);
643                                 }
644                         }
645                 } else if (unlikely(status != HV_EWOULDBLOCK))
646                         goto fatal_mondo_error;
647
648                 /* Don't bother rewriting the CPU list, just leave the
649                  * 0xffff and non-0xffff entries in there and the
650                  * hypervisor will do the right thing.
651                  *
652                  * Only advance timeout state if we didn't make any
653                  * forward progress.
654                  */
655                 if (unlikely(!forward_progress)) {
656                         if (unlikely(++retries > 10000))
657                                 goto fatal_mondo_timeout;
658
659                         /* Delay a little bit to let other cpus catch up
660                          * on their cpu mondo queue work.
661                          */
662                         udelay(2 * cnt);
663                 }
664         } while (1);
665
666         local_irq_restore(flags);
667
668         if (unlikely(!cpus_empty(error_mask)))
669                 goto fatal_mondo_cpu_error;
670
671         return;
672
673 fatal_mondo_cpu_error:
674         printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
675                "were in error state\n",
676                this_cpu);
677         printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
678         for_each_cpu_mask(i, error_mask)
679                 printk("%d ", i);
680         printk("]\n");
681         return;
682
683 fatal_mondo_timeout:
684         local_irq_restore(flags);
685         printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
686                " progress after %d retries.\n",
687                this_cpu, retries);
688         goto dump_cpu_list_and_out;
689
690 fatal_mondo_error:
691         local_irq_restore(flags);
692         printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
693                this_cpu, status);
694         printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
695                "mondo_block_pa(%lx)\n",
696                this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
697
698 dump_cpu_list_and_out:
699         printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
700         for (i = 0; i < cnt; i++)
701                 printk("%u ", cpu_list[i]);
702         printk("]\n");
703 }
704
705 /* Send cross call to all processors mentioned in MASK
706  * except self.
707  */
708 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
709 {
710         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
711         int this_cpu = get_cpu();
712
713         cpus_and(mask, mask, cpu_online_map);
714         cpu_clear(this_cpu, mask);
715
716         if (tlb_type == spitfire)
717                 spitfire_xcall_deliver(data0, data1, data2, mask);
718         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
719                 cheetah_xcall_deliver(data0, data1, data2, mask);
720         else
721                 hypervisor_xcall_deliver(data0, data1, data2, mask);
722         /* NOTE: Caller runs local copy on master. */
723
724         put_cpu();
725 }
726
727 extern unsigned long xcall_sync_tick;
728
729 static void smp_start_sync_tick_client(int cpu)
730 {
731         cpumask_t mask = cpumask_of_cpu(cpu);
732
733         smp_cross_call_masked(&xcall_sync_tick,
734                               0, 0, 0, mask);
735 }
736
737 /* Send cross call to all processors except self. */
738 #define smp_cross_call(func, ctx, data1, data2) \
739         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
740
741 struct call_data_struct {
742         void (*func) (void *info);
743         void *info;
744         atomic_t finished;
745         int wait;
746 };
747
748 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
749 static struct call_data_struct *call_data;
750
751 extern unsigned long xcall_call_function;
752
753 /**
754  * smp_call_function(): Run a function on all other CPUs.
755  * @func: The function to run. This must be fast and non-blocking.
756  * @info: An arbitrary pointer to pass to the function.
757  * @nonatomic: currently unused.
758  * @wait: If true, wait (atomically) until function has completed on other CPUs.
759  *
760  * Returns 0 on success, else a negative status code. Does not return until
761  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
762  *
763  * You must not call this function with disabled interrupts or from a
764  * hardware interrupt handler or from a bottom half handler.
765  */
766 static int smp_call_function_mask(void (*func)(void *info), void *info,
767                                   int nonatomic, int wait, cpumask_t mask)
768 {
769         struct call_data_struct data;
770         int cpus;
771
772         /* Can deadlock when called with interrupts disabled */
773         WARN_ON(irqs_disabled());
774
775         data.func = func;
776         data.info = info;
777         atomic_set(&data.finished, 0);
778         data.wait = wait;
779
780         spin_lock(&call_lock);
781
782         cpu_clear(smp_processor_id(), mask);
783         cpus = cpus_weight(mask);
784         if (!cpus)
785                 goto out_unlock;
786
787         call_data = &data;
788         mb();
789
790         smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
791
792         /* Wait for response */
793         while (atomic_read(&data.finished) != cpus)
794                 cpu_relax();
795
796 out_unlock:
797         spin_unlock(&call_lock);
798
799         return 0;
800 }
801
802 int smp_call_function(void (*func)(void *info), void *info,
803                       int nonatomic, int wait)
804 {
805         return smp_call_function_mask(func, info, nonatomic, wait,
806                                       cpu_online_map);
807 }
808
809 void smp_call_function_client(int irq, struct pt_regs *regs)
810 {
811         void (*func) (void *info) = call_data->func;
812         void *info = call_data->info;
813
814         clear_softint(1 << irq);
815         if (call_data->wait) {
816                 /* let initiator proceed only after completion */
817                 func(info);
818                 atomic_inc(&call_data->finished);
819         } else {
820                 /* let initiator proceed after getting data */
821                 atomic_inc(&call_data->finished);
822                 func(info);
823         }
824 }
825
826 static void tsb_sync(void *info)
827 {
828         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
829         struct mm_struct *mm = info;
830
831         /* It is not valid to test "currrent->active_mm == mm" here.
832          *
833          * The value of "current" is not changed atomically with
834          * switch_mm().  But that's OK, we just need to check the
835          * current cpu's trap block PGD physical address.
836          */
837         if (tp->pgd_paddr == __pa(mm->pgd))
838                 tsb_context_switch(mm);
839 }
840
841 void smp_tsb_sync(struct mm_struct *mm)
842 {
843         smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
844 }
845
846 extern unsigned long xcall_flush_tlb_mm;
847 extern unsigned long xcall_flush_tlb_pending;
848 extern unsigned long xcall_flush_tlb_kernel_range;
849 extern unsigned long xcall_report_regs;
850 extern unsigned long xcall_receive_signal;
851 extern unsigned long xcall_new_mmu_context_version;
852
853 #ifdef DCACHE_ALIASING_POSSIBLE
854 extern unsigned long xcall_flush_dcache_page_cheetah;
855 #endif
856 extern unsigned long xcall_flush_dcache_page_spitfire;
857
858 #ifdef CONFIG_DEBUG_DCFLUSH
859 extern atomic_t dcpage_flushes;
860 extern atomic_t dcpage_flushes_xcall;
861 #endif
862
863 static __inline__ void __local_flush_dcache_page(struct page *page)
864 {
865 #ifdef DCACHE_ALIASING_POSSIBLE
866         __flush_dcache_page(page_address(page),
867                             ((tlb_type == spitfire) &&
868                              page_mapping(page) != NULL));
869 #else
870         if (page_mapping(page) != NULL &&
871             tlb_type == spitfire)
872                 __flush_icache_page(__pa(page_address(page)));
873 #endif
874 }
875
876 void smp_flush_dcache_page_impl(struct page *page, int cpu)
877 {
878         cpumask_t mask = cpumask_of_cpu(cpu);
879         int this_cpu;
880
881         if (tlb_type == hypervisor)
882                 return;
883
884 #ifdef CONFIG_DEBUG_DCFLUSH
885         atomic_inc(&dcpage_flushes);
886 #endif
887
888         this_cpu = get_cpu();
889
890         if (cpu == this_cpu) {
891                 __local_flush_dcache_page(page);
892         } else if (cpu_online(cpu)) {
893                 void *pg_addr = page_address(page);
894                 u64 data0;
895
896                 if (tlb_type == spitfire) {
897                         data0 =
898                                 ((u64)&xcall_flush_dcache_page_spitfire);
899                         if (page_mapping(page) != NULL)
900                                 data0 |= ((u64)1 << 32);
901                         spitfire_xcall_deliver(data0,
902                                                __pa(pg_addr),
903                                                (u64) pg_addr,
904                                                mask);
905                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
906 #ifdef DCACHE_ALIASING_POSSIBLE
907                         data0 =
908                                 ((u64)&xcall_flush_dcache_page_cheetah);
909                         cheetah_xcall_deliver(data0,
910                                               __pa(pg_addr),
911                                               0, mask);
912 #endif
913                 }
914 #ifdef CONFIG_DEBUG_DCFLUSH
915                 atomic_inc(&dcpage_flushes_xcall);
916 #endif
917         }
918
919         put_cpu();
920 }
921
922 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
923 {
924         void *pg_addr = page_address(page);
925         cpumask_t mask = cpu_online_map;
926         u64 data0;
927         int this_cpu;
928
929         if (tlb_type == hypervisor)
930                 return;
931
932         this_cpu = get_cpu();
933
934         cpu_clear(this_cpu, mask);
935
936 #ifdef CONFIG_DEBUG_DCFLUSH
937         atomic_inc(&dcpage_flushes);
938 #endif
939         if (cpus_empty(mask))
940                 goto flush_self;
941         if (tlb_type == spitfire) {
942                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
943                 if (page_mapping(page) != NULL)
944                         data0 |= ((u64)1 << 32);
945                 spitfire_xcall_deliver(data0,
946                                        __pa(pg_addr),
947                                        (u64) pg_addr,
948                                        mask);
949         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
950 #ifdef DCACHE_ALIASING_POSSIBLE
951                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
952                 cheetah_xcall_deliver(data0,
953                                       __pa(pg_addr),
954                                       0, mask);
955 #endif
956         }
957 #ifdef CONFIG_DEBUG_DCFLUSH
958         atomic_inc(&dcpage_flushes_xcall);
959 #endif
960  flush_self:
961         __local_flush_dcache_page(page);
962
963         put_cpu();
964 }
965
966 static void __smp_receive_signal_mask(cpumask_t mask)
967 {
968         smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
969 }
970
971 void smp_receive_signal(int cpu)
972 {
973         cpumask_t mask = cpumask_of_cpu(cpu);
974
975         if (cpu_online(cpu))
976                 __smp_receive_signal_mask(mask);
977 }
978
979 void smp_receive_signal_client(int irq, struct pt_regs *regs)
980 {
981         clear_softint(1 << irq);
982 }
983
984 void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
985 {
986         struct mm_struct *mm;
987         unsigned long flags;
988
989         clear_softint(1 << irq);
990
991         /* See if we need to allocate a new TLB context because
992          * the version of the one we are using is now out of date.
993          */
994         mm = current->active_mm;
995         if (unlikely(!mm || (mm == &init_mm)))
996                 return;
997
998         spin_lock_irqsave(&mm->context.lock, flags);
999
1000         if (unlikely(!CTX_VALID(mm->context)))
1001                 get_new_mmu_context(mm);
1002
1003         spin_unlock_irqrestore(&mm->context.lock, flags);
1004
1005         load_secondary_context(mm);
1006         __flush_tlb_mm(CTX_HWBITS(mm->context),
1007                        SECONDARY_CONTEXT);
1008 }
1009
1010 void smp_new_mmu_context_version(void)
1011 {
1012         smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
1013 }
1014
1015 void smp_report_regs(void)
1016 {
1017         smp_cross_call(&xcall_report_regs, 0, 0, 0);
1018 }
1019
1020 /* We know that the window frames of the user have been flushed
1021  * to the stack before we get here because all callers of us
1022  * are flush_tlb_*() routines, and these run after flush_cache_*()
1023  * which performs the flushw.
1024  *
1025  * The SMP TLB coherency scheme we use works as follows:
1026  *
1027  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1028  *    space has (potentially) executed on, this is the heuristic
1029  *    we use to avoid doing cross calls.
1030  *
1031  *    Also, for flushing from kswapd and also for clones, we
1032  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1033  *
1034  * 2) TLB context numbers are shared globally across all processors
1035  *    in the system, this allows us to play several games to avoid
1036  *    cross calls.
1037  *
1038  *    One invariant is that when a cpu switches to a process, and
1039  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1040  *    current cpu's bit set, that tlb context is flushed locally.
1041  *
1042  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1043  *    cross calls when we want to flush the currently running process's
1044  *    tlb state.  This is done by clearing all cpu bits except the current
1045  *    processor's in current->active_mm->cpu_vm_mask and performing the
1046  *    flush locally only.  This will force any subsequent cpus which run
1047  *    this task to flush the context from the local tlb if the process
1048  *    migrates to another cpu (again).
1049  *
1050  * 3) For shared address spaces (threads) and swapping we bite the
1051  *    bullet for most cases and perform the cross call (but only to
1052  *    the cpus listed in cpu_vm_mask).
1053  *
1054  *    The performance gain from "optimizing" away the cross call for threads is
1055  *    questionable (in theory the big win for threads is the massive sharing of
1056  *    address space state across processors).
1057  */
1058
1059 /* This currently is only used by the hugetlb arch pre-fault
1060  * hook on UltraSPARC-III+ and later when changing the pagesize
1061  * bits of the context register for an address space.
1062  */
1063 void smp_flush_tlb_mm(struct mm_struct *mm)
1064 {
1065         u32 ctx = CTX_HWBITS(mm->context);
1066         int cpu = get_cpu();
1067
1068         if (atomic_read(&mm->mm_users) == 1) {
1069                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1070                 goto local_flush_and_out;
1071         }
1072
1073         smp_cross_call_masked(&xcall_flush_tlb_mm,
1074                               ctx, 0, 0,
1075                               mm->cpu_vm_mask);
1076
1077 local_flush_and_out:
1078         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1079
1080         put_cpu();
1081 }
1082
1083 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1084 {
1085         u32 ctx = CTX_HWBITS(mm->context);
1086         int cpu = get_cpu();
1087
1088         if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
1089                 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
1090         else
1091                 smp_cross_call_masked(&xcall_flush_tlb_pending,
1092                                       ctx, nr, (unsigned long) vaddrs,
1093                                       mm->cpu_vm_mask);
1094
1095         __flush_tlb_pending(ctx, nr, vaddrs);
1096
1097         put_cpu();
1098 }
1099
1100 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1101 {
1102         start &= PAGE_MASK;
1103         end    = PAGE_ALIGN(end);
1104         if (start != end) {
1105                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1106                                0, start, end);
1107
1108                 __flush_tlb_kernel_range(start, end);
1109         }
1110 }
1111
1112 /* CPU capture. */
1113 /* #define CAPTURE_DEBUG */
1114 extern unsigned long xcall_capture;
1115
1116 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1117 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1118 static unsigned long penguins_are_doing_time;
1119
1120 void smp_capture(void)
1121 {
1122         int result = atomic_add_ret(1, &smp_capture_depth);
1123
1124         if (result == 1) {
1125                 int ncpus = num_online_cpus();
1126
1127 #ifdef CAPTURE_DEBUG
1128                 printk("CPU[%d]: Sending penguins to jail...",
1129                        smp_processor_id());
1130 #endif
1131                 penguins_are_doing_time = 1;
1132                 membar_storestore_loadstore();
1133                 atomic_inc(&smp_capture_registry);
1134                 smp_cross_call(&xcall_capture, 0, 0, 0);
1135                 while (atomic_read(&smp_capture_registry) != ncpus)
1136                         rmb();
1137 #ifdef CAPTURE_DEBUG
1138                 printk("done\n");
1139 #endif
1140         }
1141 }
1142
1143 void smp_release(void)
1144 {
1145         if (atomic_dec_and_test(&smp_capture_depth)) {
1146 #ifdef CAPTURE_DEBUG
1147                 printk("CPU[%d]: Giving pardon to "
1148                        "imprisoned penguins\n",
1149                        smp_processor_id());
1150 #endif
1151                 penguins_are_doing_time = 0;
1152                 membar_storeload_storestore();
1153                 atomic_dec(&smp_capture_registry);
1154         }
1155 }
1156
1157 /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
1158  * can service tlb flush xcalls...
1159  */
1160 extern void prom_world(int);
1161
1162 void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1163 {
1164         clear_softint(1 << irq);
1165
1166         preempt_disable();
1167
1168         __asm__ __volatile__("flushw");
1169         prom_world(1);
1170         atomic_inc(&smp_capture_registry);
1171         membar_storeload_storestore();
1172         while (penguins_are_doing_time)
1173                 rmb();
1174         atomic_dec(&smp_capture_registry);
1175         prom_world(0);
1176
1177         preempt_enable();
1178 }
1179
1180 #define prof_multiplier(__cpu)          cpu_data(__cpu).multiplier
1181 #define prof_counter(__cpu)             cpu_data(__cpu).counter
1182
1183 void smp_percpu_timer_interrupt(struct pt_regs *regs)
1184 {
1185         unsigned long compare, tick, pstate;
1186         int cpu = smp_processor_id();
1187         int user = user_mode(regs);
1188
1189         /*
1190          * Check for level 14 softint.
1191          */
1192         {
1193                 unsigned long tick_mask = tick_ops->softint_mask;
1194
1195                 if (!(get_softint() & tick_mask)) {
1196                         extern void handler_irq(int, struct pt_regs *);
1197
1198                         handler_irq(14, regs);
1199                         return;
1200                 }
1201                 clear_softint(tick_mask);
1202         }
1203
1204         do {
1205                 profile_tick(CPU_PROFILING, regs);
1206                 if (!--prof_counter(cpu)) {
1207                         irq_enter();
1208
1209                         if (cpu == boot_cpu_id) {
1210                                 kstat_this_cpu.irqs[0]++;
1211                                 timer_tick_interrupt(regs);
1212                         }
1213
1214                         update_process_times(user);
1215
1216                         irq_exit();
1217
1218                         prof_counter(cpu) = prof_multiplier(cpu);
1219                 }
1220
1221                 /* Guarantee that the following sequences execute
1222                  * uninterrupted.
1223                  */
1224                 __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1225                                      "wrpr      %0, %1, %%pstate"
1226                                      : "=r" (pstate)
1227                                      : "i" (PSTATE_IE));
1228
1229                 compare = tick_ops->add_compare(current_tick_offset);
1230                 tick = tick_ops->get_tick();
1231
1232                 /* Restore PSTATE_IE. */
1233                 __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
1234                                      : /* no outputs */
1235                                      : "r" (pstate));
1236         } while (time_after_eq(tick, compare));
1237 }
1238
1239 static void __init smp_setup_percpu_timer(void)
1240 {
1241         int cpu = smp_processor_id();
1242         unsigned long pstate;
1243
1244         prof_counter(cpu) = prof_multiplier(cpu) = 1;
1245
1246         /* Guarantee that the following sequences execute
1247          * uninterrupted.
1248          */
1249         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1250                              "wrpr      %0, %1, %%pstate"
1251                              : "=r" (pstate)
1252                              : "i" (PSTATE_IE));
1253
1254         tick_ops->init_tick(current_tick_offset);
1255
1256         /* Restore PSTATE_IE. */
1257         __asm__ __volatile__("wrpr      %0, 0x0, %%pstate"
1258                              : /* no outputs */
1259                              : "r" (pstate));
1260 }
1261
1262 void __init smp_tick_init(void)
1263 {
1264         boot_cpu_id = hard_smp_processor_id();
1265         current_tick_offset = timer_tick_offset;
1266
1267         cpu_set(boot_cpu_id, cpu_online_map);
1268         prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
1269 }
1270
1271 /* /proc/profile writes can call this, don't __init it please. */
1272 static DEFINE_SPINLOCK(prof_setup_lock);
1273
1274 int setup_profiling_timer(unsigned int multiplier)
1275 {
1276         unsigned long flags;
1277         int i;
1278
1279         if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
1280                 return -EINVAL;
1281
1282         spin_lock_irqsave(&prof_setup_lock, flags);
1283         for_each_possible_cpu(i)
1284                 prof_multiplier(i) = multiplier;
1285         current_tick_offset = (timer_tick_offset / multiplier);
1286         spin_unlock_irqrestore(&prof_setup_lock, flags);
1287
1288         return 0;
1289 }
1290
1291 /* Constrain the number of cpus to max_cpus.  */
1292 void __init smp_prepare_cpus(unsigned int max_cpus)
1293 {
1294         int i;
1295
1296         if (num_possible_cpus() > max_cpus) {
1297                 int instance, mid;
1298
1299                 instance = 0;
1300                 while (!cpu_find_by_instance(instance, NULL, &mid)) {
1301                         if (mid != boot_cpu_id) {
1302                                 cpu_clear(mid, phys_cpu_present_map);
1303                                 cpu_clear(mid, cpu_present_map);
1304                                 if (num_possible_cpus() <= max_cpus)
1305                                         break;
1306                         }
1307                         instance++;
1308                 }
1309         }
1310
1311         for_each_possible_cpu(i) {
1312                 if (tlb_type == hypervisor) {
1313                         int j;
1314
1315                         /* XXX get this mapping from machine description */
1316                         for_each_possible_cpu(j) {
1317                                 if ((j >> 2) == (i >> 2))
1318                                         cpu_set(j, cpu_sibling_map[i]);
1319                         }
1320                 } else {
1321                         cpu_set(i, cpu_sibling_map[i]);
1322                 }
1323         }
1324
1325         smp_store_cpu_info(boot_cpu_id);
1326 }
1327
1328 /* Set this up early so that things like the scheduler can init
1329  * properly.  We use the same cpu mask for both the present and
1330  * possible cpu map.
1331  */
1332 void __init smp_setup_cpu_possible_map(void)
1333 {
1334         int instance, mid;
1335
1336         instance = 0;
1337         while (!cpu_find_by_instance(instance, NULL, &mid)) {
1338                 if (mid < NR_CPUS) {
1339                         cpu_set(mid, phys_cpu_present_map);
1340                         cpu_set(mid, cpu_present_map);
1341                 }
1342                 instance++;
1343         }
1344 }
1345
1346 void __devinit smp_prepare_boot_cpu(void)
1347 {
1348         int cpu = hard_smp_processor_id();
1349
1350         if (cpu >= NR_CPUS) {
1351                 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
1352                 prom_halt();
1353         }
1354
1355         current_thread_info()->cpu = cpu;
1356         __local_per_cpu_offset = __per_cpu_offset(cpu);
1357
1358         cpu_set(smp_processor_id(), cpu_online_map);
1359         cpu_set(smp_processor_id(), phys_cpu_present_map);
1360 }
1361
1362 int __devinit __cpu_up(unsigned int cpu)
1363 {
1364         int ret = smp_boot_one_cpu(cpu);
1365
1366         if (!ret) {
1367                 cpu_set(cpu, smp_commenced_mask);
1368                 while (!cpu_isset(cpu, cpu_online_map))
1369                         mb();
1370                 if (!cpu_isset(cpu, cpu_online_map)) {
1371                         ret = -ENODEV;
1372                 } else {
1373                         /* On SUN4V, writes to %tick and %stick are
1374                          * not allowed.
1375                          */
1376                         if (tlb_type != hypervisor)
1377                                 smp_synchronize_one_tick(cpu);
1378                 }
1379         }
1380         return ret;
1381 }
1382
1383 void __init smp_cpus_done(unsigned int max_cpus)
1384 {
1385         unsigned long bogosum = 0;
1386         int i;
1387
1388         for_each_online_cpu(i)
1389                 bogosum += cpu_data(i).udelay_val;
1390         printk("Total of %ld processors activated "
1391                "(%lu.%02lu BogoMIPS).\n",
1392                (long) num_online_cpus(),
1393                bogosum/(500000/HZ),
1394                (bogosum/(5000/HZ))%100);
1395 }
1396
1397 void smp_send_reschedule(int cpu)
1398 {
1399         smp_receive_signal(cpu);
1400 }
1401
1402 /* This is a nop because we capture all other cpus
1403  * anyways when making the PROM active.
1404  */
1405 void smp_send_stop(void)
1406 {
1407 }
1408
1409 unsigned long __per_cpu_base __read_mostly;
1410 unsigned long __per_cpu_shift __read_mostly;
1411
1412 EXPORT_SYMBOL(__per_cpu_base);
1413 EXPORT_SYMBOL(__per_cpu_shift);
1414
1415 void __init setup_per_cpu_areas(void)
1416 {
1417         unsigned long goal, size, i;
1418         char *ptr;
1419
1420         /* Copy section for each CPU (we discard the original) */
1421         goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
1422 #ifdef CONFIG_MODULES
1423         if (goal < PERCPU_ENOUGH_ROOM)
1424                 goal = PERCPU_ENOUGH_ROOM;
1425 #endif
1426         __per_cpu_shift = 0;
1427         for (size = 1UL; size < goal; size <<= 1UL)
1428                 __per_cpu_shift++;
1429
1430         ptr = alloc_bootmem(size * NR_CPUS);
1431
1432         __per_cpu_base = ptr - __per_cpu_start;
1433
1434         for (i = 0; i < NR_CPUS; i++, ptr += size)
1435                 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
1436 }