Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[linux-2.6] / arch / powerpc / platforms / powermac / smp.c
1 /*
2  * SMP support for power macintosh.
3  *
4  * We support both the old "powersurge" SMP architecture
5  * and the current Core99 (G4 PowerMac) machines.
6  *
7  * Note that we don't support the very first rev. of
8  * Apple/DayStar 2 CPUs board, the one with the funky
9  * watchdog. Hopefully, none of these should be there except
10  * maybe internally to Apple. I should probably still add some
11  * code to detect this card though and disable SMP. --BenH.
12  *
13  * Support Macintosh G4 SMP by Troy Benjegerdes (hozer@drgw.net)
14  * and Ben Herrenschmidt <benh@kernel.crashing.org>.
15  *
16  * Support for DayStar quad CPU cards
17  * Copyright (C) XLR8, Inc. 1994-2000
18  *
19  *  This program is free software; you can redistribute it and/or
20  *  modify it under the terms of the GNU General Public License
21  *  as published by the Free Software Foundation; either version
22  *  2 of the License, or (at your option) any later version.
23  */
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/interrupt.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/delay.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/errno.h>
35 #include <linux/hardirq.h>
36 #include <linux/cpu.h>
37
38 #include <asm/ptrace.h>
39 #include <asm/atomic.h>
40 #include <asm/irq.h>
41 #include <asm/page.h>
42 #include <asm/pgtable.h>
43 #include <asm/sections.h>
44 #include <asm/io.h>
45 #include <asm/prom.h>
46 #include <asm/smp.h>
47 #include <asm/machdep.h>
48 #include <asm/pmac_feature.h>
49 #include <asm/time.h>
50 #include <asm/mpic.h>
51 #include <asm/cacheflush.h>
52 #include <asm/keylargo.h>
53 #include <asm/pmac_low_i2c.h>
54
55 #undef DEBUG
56
57 #ifdef DEBUG
58 #define DBG(fmt...) udbg_printf(fmt)
59 #else
60 #define DBG(fmt...)
61 #endif
62
63 extern void __secondary_start_pmac_0(void);
64
65 #ifdef CONFIG_PPC32
66
67 /* Sync flag for HW tb sync */
68 static volatile int sec_tb_reset = 0;
69
70 /*
71  * Powersurge (old powermac SMP) support.
72  */
73
74 /* Addresses for powersurge registers */
75 #define HAMMERHEAD_BASE         0xf8000000
76 #define HHEAD_CONFIG            0x90
77 #define HHEAD_SEC_INTR          0xc0
78
79 /* register for interrupting the primary processor on the powersurge */
80 /* N.B. this is actually the ethernet ROM! */
81 #define PSURGE_PRI_INTR         0xf3019000
82
83 /* register for storing the start address for the secondary processor */
84 /* N.B. this is the PCI config space address register for the 1st bridge */
85 #define PSURGE_START            0xf2800000
86
87 /* Daystar/XLR8 4-CPU card */
88 #define PSURGE_QUAD_REG_ADDR    0xf8800000
89
90 #define PSURGE_QUAD_IRQ_SET     0
91 #define PSURGE_QUAD_IRQ_CLR     1
92 #define PSURGE_QUAD_IRQ_PRIMARY 2
93 #define PSURGE_QUAD_CKSTOP_CTL  3
94 #define PSURGE_QUAD_PRIMARY_ARB 4
95 #define PSURGE_QUAD_BOARD_ID    6
96 #define PSURGE_QUAD_WHICH_CPU   7
97 #define PSURGE_QUAD_CKSTOP_RDBK 8
98 #define PSURGE_QUAD_RESET_CTL   11
99
100 #define PSURGE_QUAD_OUT(r, v)   (out_8(quad_base + ((r) << 4) + 4, (v)))
101 #define PSURGE_QUAD_IN(r)       (in_8(quad_base + ((r) << 4) + 4) & 0x0f)
102 #define PSURGE_QUAD_BIS(r, v)   (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) | (v)))
103 #define PSURGE_QUAD_BIC(r, v)   (PSURGE_QUAD_OUT((r), PSURGE_QUAD_IN(r) & ~(v)))
104
105 /* virtual addresses for the above */
106 static volatile u8 __iomem *hhead_base;
107 static volatile u8 __iomem *quad_base;
108 static volatile u32 __iomem *psurge_pri_intr;
109 static volatile u8 __iomem *psurge_sec_intr;
110 static volatile u32 __iomem *psurge_start;
111
112 /* values for psurge_type */
113 #define PSURGE_NONE             -1
114 #define PSURGE_DUAL             0
115 #define PSURGE_QUAD_OKEE        1
116 #define PSURGE_QUAD_COTTON      2
117 #define PSURGE_QUAD_ICEGRASS    3
118
119 /* what sort of powersurge board we have */
120 static int psurge_type = PSURGE_NONE;
121
122 /*
123  * Set and clear IPIs for powersurge.
124  */
125 static inline void psurge_set_ipi(int cpu)
126 {
127         if (psurge_type == PSURGE_NONE)
128                 return;
129         if (cpu == 0)
130                 in_be32(psurge_pri_intr);
131         else if (psurge_type == PSURGE_DUAL)
132                 out_8(psurge_sec_intr, 0);
133         else
134                 PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_SET, 1 << cpu);
135 }
136
137 static inline void psurge_clr_ipi(int cpu)
138 {
139         if (cpu > 0) {
140                 switch(psurge_type) {
141                 case PSURGE_DUAL:
142                         out_8(psurge_sec_intr, ~0);
143                 case PSURGE_NONE:
144                         break;
145                 default:
146                         PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, 1 << cpu);
147                 }
148         }
149 }
150
151 /*
152  * On powersurge (old SMP powermac architecture) we don't have
153  * separate IPIs for separate messages like openpic does.  Instead
154  * we have a bitmap for each processor, where a 1 bit means that
155  * the corresponding message is pending for that processor.
156  * Ideally each cpu's entry would be in a different cache line.
157  *  -- paulus.
158  */
159 static unsigned long psurge_smp_message[NR_CPUS];
160
161 void psurge_smp_message_recv(struct pt_regs *regs)
162 {
163         int cpu = smp_processor_id();
164         int msg;
165
166         /* clear interrupt */
167         psurge_clr_ipi(cpu);
168
169         if (num_online_cpus() < 2)
170                 return;
171
172         /* make sure there is a message there */
173         for (msg = 0; msg < 4; msg++)
174                 if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
175                         smp_message_recv(msg, regs);
176 }
177
178 irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
179 {
180         psurge_smp_message_recv(regs);
181         return IRQ_HANDLED;
182 }
183
184 static void smp_psurge_message_pass(int target, int msg)
185 {
186         int i;
187
188         if (num_online_cpus() < 2)
189                 return;
190
191         for (i = 0; i < NR_CPUS; i++) {
192                 if (!cpu_online(i))
193                         continue;
194                 if (target == MSG_ALL
195                     || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
196                     || target == i) {
197                         set_bit(msg, &psurge_smp_message[i]);
198                         psurge_set_ipi(i);
199                 }
200         }
201 }
202
203 /*
204  * Determine a quad card presence. We read the board ID register, we
205  * force the data bus to change to something else, and we read it again.
206  * It it's stable, then the register probably exist (ugh !)
207  */
208 static int __init psurge_quad_probe(void)
209 {
210         int type;
211         unsigned int i;
212
213         type = PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID);
214         if (type < PSURGE_QUAD_OKEE || type > PSURGE_QUAD_ICEGRASS
215             || type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
216                 return PSURGE_DUAL;
217
218         /* looks OK, try a slightly more rigorous test */
219         /* bogus is not necessarily cacheline-aligned,
220            though I don't suppose that really matters.  -- paulus */
221         for (i = 0; i < 100; i++) {
222                 volatile u32 bogus[8];
223                 bogus[(0+i)%8] = 0x00000000;
224                 bogus[(1+i)%8] = 0x55555555;
225                 bogus[(2+i)%8] = 0xFFFFFFFF;
226                 bogus[(3+i)%8] = 0xAAAAAAAA;
227                 bogus[(4+i)%8] = 0x33333333;
228                 bogus[(5+i)%8] = 0xCCCCCCCC;
229                 bogus[(6+i)%8] = 0xCCCCCCCC;
230                 bogus[(7+i)%8] = 0x33333333;
231                 wmb();
232                 asm volatile("dcbf 0,%0" : : "r" (bogus) : "memory");
233                 mb();
234                 if (type != PSURGE_QUAD_IN(PSURGE_QUAD_BOARD_ID))
235                         return PSURGE_DUAL;
236         }
237         return type;
238 }
239
240 static void __init psurge_quad_init(void)
241 {
242         int procbits;
243
244         if (ppc_md.progress) ppc_md.progress("psurge_quad_init", 0x351);
245         procbits = ~PSURGE_QUAD_IN(PSURGE_QUAD_WHICH_CPU);
246         if (psurge_type == PSURGE_QUAD_ICEGRASS)
247                 PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
248         else
249                 PSURGE_QUAD_BIC(PSURGE_QUAD_CKSTOP_CTL, procbits);
250         mdelay(33);
251         out_8(psurge_sec_intr, ~0);
252         PSURGE_QUAD_OUT(PSURGE_QUAD_IRQ_CLR, procbits);
253         PSURGE_QUAD_BIS(PSURGE_QUAD_RESET_CTL, procbits);
254         if (psurge_type != PSURGE_QUAD_ICEGRASS)
255                 PSURGE_QUAD_BIS(PSURGE_QUAD_CKSTOP_CTL, procbits);
256         PSURGE_QUAD_BIC(PSURGE_QUAD_PRIMARY_ARB, procbits);
257         mdelay(33);
258         PSURGE_QUAD_BIC(PSURGE_QUAD_RESET_CTL, procbits);
259         mdelay(33);
260         PSURGE_QUAD_BIS(PSURGE_QUAD_PRIMARY_ARB, procbits);
261         mdelay(33);
262 }
263
264 static int __init smp_psurge_probe(void)
265 {
266         int i, ncpus;
267
268         /* We don't do SMP on the PPC601 -- paulus */
269         if (PVR_VER(mfspr(SPRN_PVR)) == 1)
270                 return 1;
271
272         /*
273          * The powersurge cpu board can be used in the generation
274          * of powermacs that have a socket for an upgradeable cpu card,
275          * including the 7500, 8500, 9500, 9600.
276          * The device tree doesn't tell you if you have 2 cpus because
277          * OF doesn't know anything about the 2nd processor.
278          * Instead we look for magic bits in magic registers,
279          * in the hammerhead memory controller in the case of the
280          * dual-cpu powersurge board.  -- paulus.
281          */
282         if (find_devices("hammerhead") == NULL)
283                 return 1;
284
285         hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
286         quad_base = ioremap(PSURGE_QUAD_REG_ADDR, 1024);
287         psurge_sec_intr = hhead_base + HHEAD_SEC_INTR;
288
289         psurge_type = psurge_quad_probe();
290         if (psurge_type != PSURGE_DUAL) {
291                 psurge_quad_init();
292                 /* All released cards using this HW design have 4 CPUs */
293                 ncpus = 4;
294         } else {
295                 iounmap(quad_base);
296                 if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
297                         /* not a dual-cpu card */
298                         iounmap(hhead_base);
299                         psurge_type = PSURGE_NONE;
300                         return 1;
301                 }
302                 ncpus = 2;
303         }
304
305         psurge_start = ioremap(PSURGE_START, 4);
306         psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
307
308         /* this is not actually strictly necessary -- paulus. */
309         for (i = 1; i < ncpus; ++i)
310                 smp_hw_index[i] = i;
311
312         if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
313
314         return ncpus;
315 }
316
317 static void __init smp_psurge_kick_cpu(int nr)
318 {
319         unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
320         unsigned long a;
321
322         /* may need to flush here if secondary bats aren't setup */
323         for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
324                 asm volatile("dcbf 0,%0" : : "r" (a) : "memory");
325         asm volatile("sync");
326
327         if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
328
329         out_be32(psurge_start, start);
330         mb();
331
332         psurge_set_ipi(nr);
333         udelay(10);
334         psurge_clr_ipi(nr);
335
336         if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
337 }
338
339 /*
340  * With the dual-cpu powersurge board, the decrementers and timebases
341  * of both cpus are frozen after the secondary cpu is started up,
342  * until we give the secondary cpu another interrupt.  This routine
343  * uses this to get the timebases synchronized.
344  *  -- paulus.
345  */
346 static void __init psurge_dual_sync_tb(int cpu_nr)
347 {
348         int t;
349
350         set_dec(tb_ticks_per_jiffy);
351         set_tb(0, 0);
352         last_jiffy_stamp(cpu_nr) = 0;
353
354         if (cpu_nr > 0) {
355                 mb();
356                 sec_tb_reset = 1;
357                 return;
358         }
359
360         /* wait for the secondary to have reset its TB before proceeding */
361         for (t = 10000000; t > 0 && !sec_tb_reset; --t)
362                 ;
363
364         /* now interrupt the secondary, starting both TBs */
365         psurge_set_ipi(1);
366
367         smp_tb_synchronized = 1;
368 }
369
370 static struct irqaction psurge_irqaction = {
371         .handler = psurge_primary_intr,
372         .flags = SA_INTERRUPT,
373         .mask = CPU_MASK_NONE,
374         .name = "primary IPI",
375 };
376
377 static void __init smp_psurge_setup_cpu(int cpu_nr)
378 {
379
380         if (cpu_nr == 0) {
381                 /* If we failed to start the second CPU, we should still
382                  * send it an IPI to start the timebase & DEC or we might
383                  * have them stuck.
384                  */
385                 if (num_online_cpus() < 2) {
386                         if (psurge_type == PSURGE_DUAL)
387                                 psurge_set_ipi(1);
388                         return;
389                 }
390                 /* reset the entry point so if we get another intr we won't
391                  * try to startup again */
392                 out_be32(psurge_start, 0x100);
393                 if (setup_irq(30, &psurge_irqaction))
394                         printk(KERN_ERR "Couldn't get primary IPI interrupt");
395         }
396
397         if (psurge_type == PSURGE_DUAL)
398                 psurge_dual_sync_tb(cpu_nr);
399 }
400
401 void __init smp_psurge_take_timebase(void)
402 {
403         /* Dummy implementation */
404 }
405
406 void __init smp_psurge_give_timebase(void)
407 {
408         /* Dummy implementation */
409 }
410
411 /* PowerSurge-style Macs */
412 struct smp_ops_t psurge_smp_ops = {
413         .message_pass   = smp_psurge_message_pass,
414         .probe          = smp_psurge_probe,
415         .kick_cpu       = smp_psurge_kick_cpu,
416         .setup_cpu      = smp_psurge_setup_cpu,
417         .give_timebase  = smp_psurge_give_timebase,
418         .take_timebase  = smp_psurge_take_timebase,
419 };
420 #endif /* CONFIG_PPC32 - actually powersurge support */
421
422 #ifdef CONFIG_PPC64
423 /*
424  * G5s enable/disable the timebase via an i2c-connected clock chip.
425  */
426 static struct device_node *pmac_tb_clock_chip_host;
427 static u8 pmac_tb_pulsar_addr;
428 static void (*pmac_tb_freeze)(int freeze);
429 static DEFINE_SPINLOCK(timebase_lock);
430 static unsigned long timebase;
431
432 static void smp_core99_cypress_tb_freeze(int freeze)
433 {
434         u8 data;
435         int rc;
436
437         /* Strangely, the device-tree says address is 0xd2, but darwin
438          * accesses 0xd0 ...
439          */
440         pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
441         rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
442                                0xd0 | pmac_low_i2c_read,
443                                0x81, &data, 1);
444         if (rc != 0)
445                 goto bail;
446
447         data = (data & 0xf3) | (freeze ? 0x00 : 0x0c);
448
449         pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
450         rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
451                                0xd0 | pmac_low_i2c_write,
452                                0x81, &data, 1);
453
454  bail:
455         if (rc != 0) {
456                 printk("Cypress Timebase %s rc: %d\n",
457                        freeze ? "freeze" : "unfreeze", rc);
458                 panic("Timebase freeze failed !\n");
459         }
460 }
461
462
463 static void smp_core99_pulsar_tb_freeze(int freeze)
464 {
465         u8 data;
466         int rc;
467
468         pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined);
469         rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
470                                pmac_tb_pulsar_addr | pmac_low_i2c_read,
471                                0x2e, &data, 1);
472         if (rc != 0)
473                 goto bail;
474
475         data = (data & 0x88) | (freeze ? 0x11 : 0x22);
476
477         pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub);
478         rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host,
479                                pmac_tb_pulsar_addr | pmac_low_i2c_write,
480                                0x2e, &data, 1);
481  bail:
482         if (rc != 0) {
483                 printk(KERN_ERR "Pulsar Timebase %s rc: %d\n",
484                        freeze ? "freeze" : "unfreeze", rc);
485                 panic("Timebase freeze failed !\n");
486         }
487 }
488
489
490 static void smp_core99_give_timebase(void)
491 {
492         /* Open i2c bus for synchronous access */
493         if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0))
494                 panic("Can't open i2c for TB sync !\n");
495
496         spin_lock(&timebase_lock);
497         (*pmac_tb_freeze)(1);
498         mb();
499         timebase = get_tb();
500         spin_unlock(&timebase_lock);
501
502         while (timebase)
503                 barrier();
504
505         spin_lock(&timebase_lock);
506         (*pmac_tb_freeze)(0);
507         spin_unlock(&timebase_lock);
508
509         /* Close i2c bus */
510         pmac_low_i2c_close(pmac_tb_clock_chip_host);
511 }
512
513
514 static void __devinit smp_core99_take_timebase(void)
515 {
516         while (!timebase)
517                 barrier();
518         spin_lock(&timebase_lock);
519         set_tb(timebase >> 32, timebase & 0xffffffff);
520         timebase = 0;
521         spin_unlock(&timebase_lock);
522 }
523
524 static void __init smp_core99_setup(int ncpus)
525 {
526         struct device_node *cc = NULL;  
527         struct device_node *p;
528         u32 *reg;
529         int ok;
530
531         /* HW sync only on these platforms */
532         if (!machine_is_compatible("PowerMac7,2") &&
533             !machine_is_compatible("PowerMac7,3") &&
534             !machine_is_compatible("RackMac3,1"))
535                 return;
536
537         /* Look for the clock chip */
538         while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) {
539                 p = of_get_parent(cc);
540                 ok = p && device_is_compatible(p, "uni-n-i2c");
541                 of_node_put(p);
542                 if (!ok)
543                         continue;
544
545                 reg = (u32 *)get_property(cc, "reg", NULL);
546                 if (reg == NULL)
547                         continue;
548
549                 switch (*reg) {
550                 case 0xd2:
551                         if (device_is_compatible(cc, "pulsar-legacy-slewing")) {
552                                 pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
553                                 pmac_tb_pulsar_addr = 0xd2;
554                                 printk(KERN_INFO "Timebase clock is Pulsar chip\n");
555                         } else if (device_is_compatible(cc, "cy28508")) {
556                                 pmac_tb_freeze = smp_core99_cypress_tb_freeze;
557                                 printk(KERN_INFO "Timebase clock is Cypress chip\n");
558                         }
559                         break;
560                 case 0xd4:
561                         pmac_tb_freeze = smp_core99_pulsar_tb_freeze;
562                         pmac_tb_pulsar_addr = 0xd4;
563                         printk(KERN_INFO "Timebase clock is Pulsar chip\n");
564                         break;
565                 }
566                 if (pmac_tb_freeze != NULL) {
567                         pmac_tb_clock_chip_host = of_get_parent(cc);
568                         of_node_put(cc);
569                         break;
570                 }
571         }
572         if (pmac_tb_freeze == NULL) {
573                 smp_ops->give_timebase = smp_generic_give_timebase;
574                 smp_ops->take_timebase = smp_generic_take_timebase;
575         }
576 }
577
578 /* nothing to do here, caches are already set up by service processor */
579 static inline void __devinit core99_init_caches(int cpu)
580 {
581 }
582
583 #else /* CONFIG_PPC64 */
584
585 /*
586  * SMP G4 powermacs use a GPIO to enable/disable the timebase.
587  */
588
589 static unsigned int core99_tb_gpio;     /* Timebase freeze GPIO */
590
591 static unsigned int pri_tb_hi, pri_tb_lo;
592 static unsigned int pri_tb_stamp;
593
594 /* not __init, called in sleep/wakeup code */
595 void smp_core99_give_timebase(void)
596 {
597         unsigned long flags;
598         unsigned int t;
599
600         /* wait for the secondary to be in take_timebase */
601         for (t = 100000; t > 0 && !sec_tb_reset; --t)
602                 udelay(10);
603         if (!sec_tb_reset) {
604                 printk(KERN_WARNING "Timeout waiting sync on second CPU\n");
605                 return;
606         }
607
608         /* freeze the timebase and read it */
609         /* disable interrupts so the timebase is disabled for the
610            shortest possible time */
611         local_irq_save(flags);
612         pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4);
613         pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
614         mb();
615         pri_tb_hi = get_tbu();
616         pri_tb_lo = get_tbl();
617         pri_tb_stamp = last_jiffy_stamp(smp_processor_id());
618         mb();
619
620         /* tell the secondary we're ready */
621         sec_tb_reset = 2;
622         mb();
623
624         /* wait for the secondary to have taken it */
625         for (t = 100000; t > 0 && sec_tb_reset; --t)
626                 udelay(10);
627         if (sec_tb_reset)
628                 printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
629         else
630                 smp_tb_synchronized = 1;
631
632         /* Now, restart the timebase by leaving the GPIO to an open collector */
633         pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
634         pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0);
635         local_irq_restore(flags);
636 }
637
638 /* not __init, called in sleep/wakeup code */
639 void smp_core99_take_timebase(void)
640 {
641         unsigned long flags;
642
643         /* tell the primary we're here */
644         sec_tb_reset = 1;
645         mb();
646
647         /* wait for the primary to set pri_tb_hi/lo */
648         while (sec_tb_reset < 2)
649                 mb();
650
651         /* set our stuff the same as the primary */
652         local_irq_save(flags);
653         set_dec(1);
654         set_tb(pri_tb_hi, pri_tb_lo);
655         last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp;
656         mb();
657
658         /* tell the primary we're done */
659         sec_tb_reset = 0;
660         mb();
661         local_irq_restore(flags);
662 }
663
664 /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
665 volatile static long int core99_l2_cache;
666 volatile static long int core99_l3_cache;
667
668 static void __devinit core99_init_caches(int cpu)
669 {
670         if (!cpu_has_feature(CPU_FTR_L2CR))
671                 return;
672
673         if (cpu == 0) {
674                 core99_l2_cache = _get_L2CR();
675                 printk("CPU0: L2CR is %lx\n", core99_l2_cache);
676         } else {
677                 printk("CPU%d: L2CR was %lx\n", cpu, _get_L2CR());
678                 _set_L2CR(0);
679                 _set_L2CR(core99_l2_cache);
680                 printk("CPU%d: L2CR set to %lx\n", cpu, core99_l2_cache);
681         }
682
683         if (!cpu_has_feature(CPU_FTR_L3CR))
684                 return;
685
686         if (cpu == 0){
687                 core99_l3_cache = _get_L3CR();
688                 printk("CPU0: L3CR is %lx\n", core99_l3_cache);
689         } else {
690                 printk("CPU%d: L3CR was %lx\n", cpu, _get_L3CR());
691                 _set_L3CR(0);
692                 _set_L3CR(core99_l3_cache);
693                 printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache);
694         }
695 }
696
697 static void __init smp_core99_setup(int ncpus)
698 {
699         struct device_node *cpu;
700         u32 *tbprop = NULL;
701         int i;
702
703         core99_tb_gpio = KL_GPIO_TB_ENABLE;     /* default value */
704         cpu = of_find_node_by_type(NULL, "cpu");
705         if (cpu != NULL) {
706                 tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL);
707                 if (tbprop)
708                         core99_tb_gpio = *tbprop;
709                 of_node_put(cpu);
710         }
711
712         /* XXX should get this from reg properties */
713         for (i = 1; i < ncpus; ++i)
714                 smp_hw_index[i] = i;
715         powersave_nap = 0;
716 }
717 #endif
718
719 static int __init smp_core99_probe(void)
720 {
721         struct device_node *cpus;
722         int ncpus = 0;
723
724         if (ppc_md.progress) ppc_md.progress("smp_core99_probe", 0x345);
725
726         /* Count CPUs in the device-tree */
727         for (cpus = NULL; (cpus = of_find_node_by_type(cpus, "cpu")) != NULL;)
728                 ++ncpus;
729
730         printk(KERN_INFO "PowerMac SMP probe found %d cpus\n", ncpus);
731
732         /* Nothing more to do if less than 2 of them */
733         if (ncpus <= 1)
734                 return 1;
735
736         smp_core99_setup(ncpus);
737         mpic_request_ipis();
738         core99_init_caches(0);
739
740         return ncpus;
741 }
742
743 static void __devinit smp_core99_kick_cpu(int nr)
744 {
745         unsigned int save_vector;
746         unsigned long new_vector;
747         unsigned long flags;
748         volatile unsigned int *vector
749                  = ((volatile unsigned int *)(KERNELBASE+0x100));
750
751         if (nr < 0 || nr > 3)
752                 return;
753         if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346);
754
755         local_irq_save(flags);
756         local_irq_disable();
757
758         /* Save reset vector */
759         save_vector = *vector;
760
761         /* Setup fake reset vector that does    
762          *   b __secondary_start_pmac_0 + nr*8 - KERNELBASE
763          */
764         new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8;
765         *vector = 0x48000002 + new_vector - KERNELBASE;
766
767         /* flush data cache and inval instruction cache */
768         flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
769
770         /* Put some life in our friend */
771         pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
772
773         /* FIXME: We wait a bit for the CPU to take the exception, I should
774          * instead wait for the entry code to set something for me. Well,
775          * ideally, all that crap will be done in prom.c and the CPU left
776          * in a RAM-based wait loop like CHRP.
777          */
778         mdelay(1);
779
780         /* Restore our exception vector */
781         *vector = save_vector;
782         flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
783
784         local_irq_restore(flags);
785         if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
786 }
787
788 static void __devinit smp_core99_setup_cpu(int cpu_nr)
789 {
790         /* Setup L2/L3 */
791         if (cpu_nr != 0)
792                 core99_init_caches(cpu_nr);
793
794         /* Setup openpic */
795         mpic_setup_this_cpu();
796
797         if (cpu_nr == 0) {
798 #ifdef CONFIG_POWER4
799                 extern void g5_phy_disable_cpu1(void);
800
801                 /* If we didn't start the second CPU, we must take
802                  * it off the bus
803                  */
804                 if (machine_is_compatible("MacRISC4") &&
805                     num_online_cpus() < 2)              
806                         g5_phy_disable_cpu1();
807 #endif /* CONFIG_POWER4 */
808                 if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349);
809         }
810 }
811
812
813 /* Core99 Macs (dual G4s and G5s) */
814 struct smp_ops_t core99_smp_ops = {
815         .message_pass   = smp_mpic_message_pass,
816         .probe          = smp_core99_probe,
817         .kick_cpu       = smp_core99_kick_cpu,
818         .setup_cpu      = smp_core99_setup_cpu,
819         .give_timebase  = smp_core99_give_timebase,
820         .take_timebase  = smp_core99_take_timebase,
821 };
822
823 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
824
825 int __cpu_disable(void)
826 {
827         cpu_clear(smp_processor_id(), cpu_online_map);
828
829         /* XXX reset cpu affinity here */
830         mpic_cpu_set_priority(0xf);
831         asm volatile("mtdec %0" : : "r" (0x7fffffff));
832         mb();
833         udelay(20);
834         asm volatile("mtdec %0" : : "r" (0x7fffffff));
835         return 0;
836 }
837
838 extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
839 static int cpu_dead[NR_CPUS];
840
841 void cpu_die(void)
842 {
843         local_irq_disable();
844         cpu_dead[smp_processor_id()] = 1;
845         mb();
846         low_cpu_die();
847 }
848
849 void __cpu_die(unsigned int cpu)
850 {
851         int timeout;
852
853         timeout = 1000;
854         while (!cpu_dead[cpu]) {
855                 if (--timeout == 0) {
856                         printk("CPU %u refused to die!\n", cpu);
857                         break;
858                 }
859                 msleep(1);
860         }
861         cpu_callin_map[cpu] = 0;
862         cpu_dead[cpu] = 0;
863 }
864
865 #endif