Merge branch 'upstream'
[linux-2.6] / arch / sparc / kernel / sun4m_smp.c
1 /* sun4m_smp.c: Sparc SUN4M SMP support.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  */
5
6 #include <asm/head.h>
7
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/profile.h>
20 #include <asm/cacheflush.h>
21 #include <asm/tlbflush.h>
22
23 #include <asm/ptrace.h>
24 #include <asm/atomic.h>
25
26 #include <asm/delay.h>
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cpudata.h>
33
34 #define IRQ_RESCHEDULE          13
35 #define IRQ_STOP_CPU            14
36 #define IRQ_CROSS_CALL          15
37
38 extern ctxd_t *srmmu_ctx_table_phys;
39
40 extern void calibrate_delay(void);
41
42 extern volatile int smp_processors_ready;
43 extern int smp_num_cpus;
44 extern volatile unsigned long cpu_callin_map[NR_CPUS];
45 extern unsigned char boot_cpu_id;
46 extern int smp_activated;
47 extern volatile int __cpu_number_map[NR_CPUS];
48 extern volatile int __cpu_logical_map[NR_CPUS];
49 extern volatile unsigned long ipi_count;
50 extern volatile int smp_process_available;
51 extern volatile int smp_commenced;
52 extern int __smp4m_processor_id(void);
53
54 /*#define SMP_DEBUG*/
55
56 #ifdef SMP_DEBUG
57 #define SMP_PRINTK(x)   printk x
58 #else
59 #define SMP_PRINTK(x)
60 #endif
61
62 static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
63 {
64         __asm__ __volatile__("swap [%1], %0\n\t" :
65                              "=&r" (val), "=&r" (ptr) :
66                              "0" (val), "1" (ptr));
67         return val;
68 }
69
70 static void smp_setup_percpu_timer(void);
71 extern void cpu_probe(void);
72
73 void __init smp4m_callin(void)
74 {
75         int cpuid = hard_smp_processor_id();
76
77         local_flush_cache_all();
78         local_flush_tlb_all();
79
80         set_irq_udt(boot_cpu_id);
81
82         /* Get our local ticker going. */
83         smp_setup_percpu_timer();
84
85         calibrate_delay();
86         smp_store_cpu_info(cpuid);
87
88         local_flush_cache_all();
89         local_flush_tlb_all();
90
91         /*
92          * Unblock the master CPU _only_ when the scheduler state
93          * of all secondary CPUs will be up-to-date, so after
94          * the SMP initialization the master will be just allowed
95          * to call the scheduler code.
96          */
97         /* Allow master to continue. */
98         swap((unsigned long *)&cpu_callin_map[cpuid], 1);
99
100         local_flush_cache_all();
101         local_flush_tlb_all();
102         
103         cpu_probe();
104
105         /* Fix idle thread fields. */
106         __asm__ __volatile__("ld [%0], %%g6\n\t"
107                              : : "r" (&current_set[cpuid])
108                              : "memory" /* paranoid */);
109
110         /* Attach to the address space of init_task. */
111         atomic_inc(&init_mm.mm_count);
112         current->active_mm = &init_mm;
113
114         while(!smp_commenced)
115                 barrier();
116
117         local_flush_cache_all();
118         local_flush_tlb_all();
119
120         local_irq_enable();
121 }
122
123 extern void init_IRQ(void);
124 extern void cpu_panic(void);
125
126 /*
127  *      Cycle through the processors asking the PROM to start each one.
128  */
129  
130 extern struct linux_prom_registers smp_penguin_ctable;
131 extern unsigned long trapbase_cpu1[];
132 extern unsigned long trapbase_cpu2[];
133 extern unsigned long trapbase_cpu3[];
134
135 void __init smp4m_boot_cpus(void)
136 {
137         int cpucount = 0;
138         int i, mid;
139
140         printk("Entering SMP Mode...\n");
141
142         local_irq_enable();
143         cpus_clear(cpu_present_map);
144
145         for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
146                 cpu_set(mid, cpu_present_map);
147
148         for(i=0; i < NR_CPUS; i++) {
149                 __cpu_number_map[i] = -1;
150                 __cpu_logical_map[i] = -1;
151         }
152
153         __cpu_number_map[boot_cpu_id] = 0;
154         __cpu_logical_map[0] = boot_cpu_id;
155         current_thread_info()->cpu = boot_cpu_id;
156
157         smp_store_cpu_info(boot_cpu_id);
158         set_irq_udt(boot_cpu_id);
159         smp_setup_percpu_timer();
160         local_flush_cache_all();
161         if(cpu_find_by_instance(1, NULL, NULL))
162                 return;  /* Not an MP box. */
163         for(i = 0; i < NR_CPUS; i++) {
164                 if(i == boot_cpu_id)
165                         continue;
166
167                 if (cpu_isset(i, cpu_present_map)) {
168                         extern unsigned long sun4m_cpu_startup;
169                         unsigned long *entry = &sun4m_cpu_startup;
170                         struct task_struct *p;
171                         int timeout;
172
173                         /* Cook up an idler for this guy. */
174                         p = fork_idle(i);
175                         cpucount++;
176                         current_set[i] = task_thread_info(p);
177                         /* See trampoline.S for details... */
178                         entry += ((i-1) * 3);
179
180                         /*
181                          * Initialize the contexts table
182                          * Since the call to prom_startcpu() trashes the structure,
183                          * we need to re-initialize it for each cpu
184                          */
185                         smp_penguin_ctable.which_io = 0;
186                         smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
187                         smp_penguin_ctable.reg_size = 0;
188
189                         /* whirrr, whirrr, whirrrrrrrrr... */
190                         printk("Starting CPU %d at %p\n", i, entry);
191                         local_flush_cache_all();
192                         prom_startcpu(cpu_data(i).prom_node,
193                                       &smp_penguin_ctable, 0, (char *)entry);
194
195                         /* wheee... it's going... */
196                         for(timeout = 0; timeout < 10000; timeout++) {
197                                 if(cpu_callin_map[i])
198                                         break;
199                                 udelay(200);
200                         }
201                         if(cpu_callin_map[i]) {
202                                 /* Another "Red Snapper". */
203                                 __cpu_number_map[i] = i;
204                                 __cpu_logical_map[i] = i;
205                         } else {
206                                 cpucount--;
207                                 printk("Processor %d is stuck.\n", i);
208                         }
209                 }
210                 if(!(cpu_callin_map[i])) {
211                         cpu_clear(i, cpu_present_map);
212                         __cpu_number_map[i] = -1;
213                 }
214         }
215         local_flush_cache_all();
216         if(cpucount == 0) {
217                 printk("Error: only one Processor found.\n");
218                 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219         } else {
220                 unsigned long bogosum = 0;
221                 for(i = 0; i < NR_CPUS; i++) {
222                         if (cpu_isset(i, cpu_present_map))
223                                 bogosum += cpu_data(i).udelay_val;
224                 }
225                 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226                        cpucount + 1,
227                        bogosum/(500000/HZ),
228                        (bogosum/(5000/HZ))%100);
229                 smp_activated = 1;
230                 smp_num_cpus = cpucount + 1;
231         }
232
233         /* Free unneeded trap tables */
234         if (!cpu_isset(i, cpu_present_map)) {
235                 ClearPageReserved(virt_to_page(trapbase_cpu1));
236                 set_page_count(virt_to_page(trapbase_cpu1), 1);
237                 free_page((unsigned long)trapbase_cpu1);
238                 totalram_pages++;
239                 num_physpages++;
240         }
241         if (!cpu_isset(2, cpu_present_map)) {
242                 ClearPageReserved(virt_to_page(trapbase_cpu2));
243                 set_page_count(virt_to_page(trapbase_cpu2), 1);
244                 free_page((unsigned long)trapbase_cpu2);
245                 totalram_pages++;
246                 num_physpages++;
247         }
248         if (!cpu_isset(3, cpu_present_map)) {
249                 ClearPageReserved(virt_to_page(trapbase_cpu3));
250                 set_page_count(virt_to_page(trapbase_cpu3), 1);
251                 free_page((unsigned long)trapbase_cpu3);
252                 totalram_pages++;
253                 num_physpages++;
254         }
255
256         /* Ok, they are spinning and ready to go. */
257         smp_processors_ready = 1;
258 }
259
260 /* At each hardware IRQ, we get this called to forward IRQ reception
261  * to the next processor.  The caller must disable the IRQ level being
262  * serviced globally so that there are no double interrupts received.
263  *
264  * XXX See sparc64 irq.c.
265  */
266 void smp4m_irq_rotate(int cpu)
267 {
268 }
269
270 /* Cross calls, in order to work efficiently and atomically do all
271  * the message passing work themselves, only stopcpu and reschedule
272  * messages come through here.
273  */
274 void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
275 {
276         static unsigned long smp_cpu_in_msg[NR_CPUS];
277         cpumask_t mask;
278         int me = smp_processor_id();
279         int irq, i;
280
281         if(msg == MSG_RESCHEDULE) {
282                 irq = IRQ_RESCHEDULE;
283
284                 if(smp_cpu_in_msg[me])
285                         return;
286         } else if(msg == MSG_STOP_CPU) {
287                 irq = IRQ_STOP_CPU;
288         } else {
289                 goto barf;
290         }
291
292         smp_cpu_in_msg[me]++;
293         if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
294                 mask = cpu_present_map;
295                 if(target == MSG_ALL_BUT_SELF)
296                         cpu_clear(me, mask);
297                 for(i = 0; i < 4; i++) {
298                         if (cpu_isset(i, mask))
299                                 set_cpu_int(i, irq);
300                 }
301         } else {
302                 set_cpu_int(target, irq);
303         }
304         smp_cpu_in_msg[me]--;
305
306         return;
307 barf:
308         printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
309         panic("Bogon SMP message pass.");
310 }
311
312 static struct smp_funcall {
313         smpfunc_t func;
314         unsigned long arg1;
315         unsigned long arg2;
316         unsigned long arg3;
317         unsigned long arg4;
318         unsigned long arg5;
319         unsigned long processors_in[NR_CPUS];  /* Set when ipi entered. */
320         unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
321 } ccall_info;
322
323 static DEFINE_SPINLOCK(cross_call_lock);
324
325 /* Cross calls must be serialized, at least currently. */
326 void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
327                     unsigned long arg3, unsigned long arg4, unsigned long arg5)
328 {
329         if(smp_processors_ready) {
330                 register int ncpus = smp_num_cpus;
331                 unsigned long flags;
332
333                 spin_lock_irqsave(&cross_call_lock, flags);
334
335                 /* Init function glue. */
336                 ccall_info.func = func;
337                 ccall_info.arg1 = arg1;
338                 ccall_info.arg2 = arg2;
339                 ccall_info.arg3 = arg3;
340                 ccall_info.arg4 = arg4;
341                 ccall_info.arg5 = arg5;
342
343                 /* Init receive/complete mapping, plus fire the IPI's off. */
344                 {
345                         cpumask_t mask = cpu_present_map;
346                         register int i;
347
348                         cpu_clear(smp_processor_id(), mask);
349                         for(i = 0; i < ncpus; i++) {
350                                 if (cpu_isset(i, mask)) {
351                                         ccall_info.processors_in[i] = 0;
352                                         ccall_info.processors_out[i] = 0;
353                                         set_cpu_int(i, IRQ_CROSS_CALL);
354                                 } else {
355                                         ccall_info.processors_in[i] = 1;
356                                         ccall_info.processors_out[i] = 1;
357                                 }
358                         }
359                 }
360
361                 {
362                         register int i;
363
364                         i = 0;
365                         do {
366                                 while(!ccall_info.processors_in[i])
367                                         barrier();
368                         } while(++i < ncpus);
369
370                         i = 0;
371                         do {
372                                 while(!ccall_info.processors_out[i])
373                                         barrier();
374                         } while(++i < ncpus);
375                 }
376
377                 spin_unlock_irqrestore(&cross_call_lock, flags);
378         }
379 }
380
381 /* Running cross calls. */
382 void smp4m_cross_call_irq(void)
383 {
384         int i = smp_processor_id();
385
386         ccall_info.processors_in[i] = 1;
387         ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
388                         ccall_info.arg4, ccall_info.arg5);
389         ccall_info.processors_out[i] = 1;
390 }
391
392 void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
393 {
394         int cpu = smp_processor_id();
395
396         clear_profile_irq(cpu);
397
398         profile_tick(CPU_PROFILING, regs);
399
400         if(!--prof_counter(cpu)) {
401                 int user = user_mode(regs);
402
403                 irq_enter();
404                 update_process_times(user);
405                 irq_exit();
406
407                 prof_counter(cpu) = prof_multiplier(cpu);
408         }
409 }
410
411 extern unsigned int lvl14_resolution;
412
413 static void __init smp_setup_percpu_timer(void)
414 {
415         int cpu = smp_processor_id();
416
417         prof_counter(cpu) = prof_multiplier(cpu) = 1;
418         load_profile_irq(cpu, lvl14_resolution);
419
420         if(cpu == boot_cpu_id)
421                 enable_pil_irq(14);
422 }
423
424 void __init smp4m_blackbox_id(unsigned *addr)
425 {
426         int rd = *addr & 0x3e000000;
427         int rs1 = rd >> 11;
428         
429         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
430         addr[1] = 0x8130200c | rd | rs1;        /* srl reg, 0xc, reg */
431         addr[2] = 0x80082003 | rd | rs1;        /* and reg, 3, reg */
432 }
433
434 void __init smp4m_blackbox_current(unsigned *addr)
435 {
436         int rd = *addr & 0x3e000000;
437         int rs1 = rd >> 11;
438         
439         addr[0] = 0x81580000 | rd;              /* rd %tbr, reg */
440         addr[2] = 0x8130200a | rd | rs1;        /* srl reg, 0xa, reg */
441         addr[4] = 0x8008200c | rd | rs1;        /* and reg, 3, reg */
442 }
443
444 void __init sun4m_init_smp(void)
445 {
446         BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
447         BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
448         BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
449         BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
450         BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
451 }