Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[linux-2.6] / arch / sparc / kernel / smp.c
1 /* smp.c: Sparc SMP support.
2  *
3  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
5  * Copyright (C) 2004 Keith M Wesolowski (wesolows@foobazco.org)
6  */
7
8 #include <asm/head.h>
9
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23
24 #include <asm/ptrace.h>
25 #include <asm/atomic.h>
26
27 #include <asm/irq.h>
28 #include <asm/page.h>
29 #include <asm/pgalloc.h>
30 #include <asm/pgtable.h>
31 #include <asm/oplib.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cpudata.h>
35
36 #include "irq.h"
37
38 int smp_num_cpus = 1;
39 volatile unsigned long cpu_callin_map[NR_CPUS] __initdata = {0,};
40 unsigned char boot_cpu_id = 0;
41 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
42 int smp_activated = 0;
43 volatile int __cpu_number_map[NR_CPUS];
44 volatile int __cpu_logical_map[NR_CPUS];
45
46 cpumask_t cpu_online_map = CPU_MASK_NONE;
47 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
48 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
49
50 /* The only guaranteed locking primitive available on all Sparc
51  * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
52  * places the current byte at the effective address into dest_reg and
53  * places 0xff there afterwards.  Pretty lame locking primitive
54  * compared to the Alpha and the Intel no?  Most Sparcs have 'swap'
55  * instruction which is much better...
56  */
57
58 /* Used to make bitops atomic */
59 unsigned char bitops_spinlock = 0;
60
61 void __cpuinit smp_store_cpu_info(int id)
62 {
63         int cpu_node;
64
65         cpu_data(id).udelay_val = loops_per_jiffy;
66
67         cpu_find_by_mid(id, &cpu_node);
68         cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
69                                                      "clock-frequency", 0);
70         cpu_data(id).prom_node = cpu_node;
71         cpu_data(id).mid = cpu_get_hwmid(cpu_node);
72
73         if (cpu_data(id).mid < 0)
74                 panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
75 }
76
77 void __init smp_cpus_done(unsigned int max_cpus)
78 {
79         extern void smp4m_smp_done(void);
80         extern void smp4d_smp_done(void);
81         unsigned long bogosum = 0;
82         int cpu, num;
83
84         for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
85                 if (cpu_online(cpu)) {
86                         num++;
87                         bogosum += cpu_data(cpu).udelay_val;
88                 }
89
90         printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
91                 num, bogosum/(500000/HZ),
92                 (bogosum/(5000/HZ))%100);
93
94         switch(sparc_cpu_model) {
95         case sun4:
96                 printk("SUN4\n");
97                 BUG();
98                 break;
99         case sun4c:
100                 printk("SUN4C\n");
101                 BUG();
102                 break;
103         case sun4m:
104                 smp4m_smp_done();
105                 break;
106         case sun4d:
107                 smp4d_smp_done();
108                 break;
109         case sun4e:
110                 printk("SUN4E\n");
111                 BUG();
112                 break;
113         case sun4u:
114                 printk("SUN4U\n");
115                 BUG();
116                 break;
117         default:
118                 printk("UNKNOWN!\n");
119                 BUG();
120                 break;
121         };
122 }
123
124 void cpu_panic(void)
125 {
126         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
127         panic("SMP bolixed\n");
128 }
129
130 struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
131
132 void smp_send_reschedule(int cpu)
133 {
134         /* See sparc64 */
135 }
136
137 void smp_send_stop(void)
138 {
139 }
140
141 void smp_flush_cache_all(void)
142 {
143         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all));
144         local_flush_cache_all();
145 }
146
147 void smp_flush_tlb_all(void)
148 {
149         xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all));
150         local_flush_tlb_all();
151 }
152
153 void smp_flush_cache_mm(struct mm_struct *mm)
154 {
155         if(mm->context != NO_CONTEXT) {
156                 cpumask_t cpu_mask = mm->cpu_vm_mask;
157                 cpu_clear(smp_processor_id(), cpu_mask);
158                 if (!cpus_empty(cpu_mask))
159                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm);
160                 local_flush_cache_mm(mm);
161         }
162 }
163
164 void smp_flush_tlb_mm(struct mm_struct *mm)
165 {
166         if(mm->context != NO_CONTEXT) {
167                 cpumask_t cpu_mask = mm->cpu_vm_mask;
168                 cpu_clear(smp_processor_id(), cpu_mask);
169                 if (!cpus_empty(cpu_mask)) {
170                         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm);
171                         if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
172                                 mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id());
173                 }
174                 local_flush_tlb_mm(mm);
175         }
176 }
177
178 void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
179                            unsigned long end)
180 {
181         struct mm_struct *mm = vma->vm_mm;
182
183         if (mm->context != NO_CONTEXT) {
184                 cpumask_t cpu_mask = mm->cpu_vm_mask;
185                 cpu_clear(smp_processor_id(), cpu_mask);
186                 if (!cpus_empty(cpu_mask))
187                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end);
188                 local_flush_cache_range(vma, start, end);
189         }
190 }
191
192 void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
193                          unsigned long end)
194 {
195         struct mm_struct *mm = vma->vm_mm;
196
197         if (mm->context != NO_CONTEXT) {
198                 cpumask_t cpu_mask = mm->cpu_vm_mask;
199                 cpu_clear(smp_processor_id(), cpu_mask);
200                 if (!cpus_empty(cpu_mask))
201                         xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end);
202                 local_flush_tlb_range(vma, start, end);
203         }
204 }
205
206 void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
207 {
208         struct mm_struct *mm = vma->vm_mm;
209
210         if(mm->context != NO_CONTEXT) {
211                 cpumask_t cpu_mask = mm->cpu_vm_mask;
212                 cpu_clear(smp_processor_id(), cpu_mask);
213                 if (!cpus_empty(cpu_mask))
214                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page);
215                 local_flush_cache_page(vma, page);
216         }
217 }
218
219 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
220 {
221         struct mm_struct *mm = vma->vm_mm;
222
223         if(mm->context != NO_CONTEXT) {
224                 cpumask_t cpu_mask = mm->cpu_vm_mask;
225                 cpu_clear(smp_processor_id(), cpu_mask);
226                 if (!cpus_empty(cpu_mask))
227                         xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page);
228                 local_flush_tlb_page(vma, page);
229         }
230 }
231
232 void smp_reschedule_irq(void)
233 {
234         set_need_resched();
235 }
236
237 void smp_flush_page_to_ram(unsigned long page)
238 {
239         /* Current theory is that those who call this are the one's
240          * who have just dirtied their cache with the pages contents
241          * in kernel space, therefore we only run this on local cpu.
242          *
243          * XXX This experiment failed, research further... -DaveM
244          */
245 #if 1
246         xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page);
247 #endif
248         local_flush_page_to_ram(page);
249 }
250
251 void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
252 {
253         cpumask_t cpu_mask = mm->cpu_vm_mask;
254         cpu_clear(smp_processor_id(), cpu_mask);
255         if (!cpus_empty(cpu_mask))
256                 xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr);
257         local_flush_sig_insns(mm, insn_addr);
258 }
259
260 extern unsigned int lvl14_resolution;
261
262 /* /proc/profile writes can call this, don't __init it please. */
263 static DEFINE_SPINLOCK(prof_setup_lock);
264
265 int setup_profiling_timer(unsigned int multiplier)
266 {
267         int i;
268         unsigned long flags;
269
270         /* Prevent level14 ticker IRQ flooding. */
271         if((!multiplier) || (lvl14_resolution / multiplier) < 500)
272                 return -EINVAL;
273
274         spin_lock_irqsave(&prof_setup_lock, flags);
275         for_each_possible_cpu(i) {
276                 load_profile_irq(i, lvl14_resolution / multiplier);
277                 prof_multiplier(i) = multiplier;
278         }
279         spin_unlock_irqrestore(&prof_setup_lock, flags);
280
281         return 0;
282 }
283
284 void __init smp_prepare_cpus(unsigned int max_cpus)
285 {
286         extern void __init smp4m_boot_cpus(void);
287         extern void __init smp4d_boot_cpus(void);
288         int i, cpuid, extra;
289
290         printk("Entering SMP Mode...\n");
291
292         extra = 0;
293         for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
294                 if (cpuid >= NR_CPUS)
295                         extra++;
296         }
297         /* i = number of cpus */
298         if (extra && max_cpus > i - extra)
299                 printk("Warning: NR_CPUS is too low to start all cpus\n");
300
301         smp_store_cpu_info(boot_cpu_id);
302
303         switch(sparc_cpu_model) {
304         case sun4:
305                 printk("SUN4\n");
306                 BUG();
307                 break;
308         case sun4c:
309                 printk("SUN4C\n");
310                 BUG();
311                 break;
312         case sun4m:
313                 smp4m_boot_cpus();
314                 break;
315         case sun4d:
316                 smp4d_boot_cpus();
317                 break;
318         case sun4e:
319                 printk("SUN4E\n");
320                 BUG();
321                 break;
322         case sun4u:
323                 printk("SUN4U\n");
324                 BUG();
325                 break;
326         default:
327                 printk("UNKNOWN!\n");
328                 BUG();
329                 break;
330         };
331 }
332
333 /* Set this up early so that things like the scheduler can init
334  * properly.  We use the same cpu mask for both the present and
335  * possible cpu map.
336  */
337 void __init smp_setup_cpu_possible_map(void)
338 {
339         int instance, mid;
340
341         instance = 0;
342         while (!cpu_find_by_instance(instance, NULL, &mid)) {
343                 if (mid < NR_CPUS) {
344                         cpu_set(mid, phys_cpu_present_map);
345                         cpu_set(mid, cpu_present_map);
346                 }
347                 instance++;
348         }
349 }
350
351 void __init smp_prepare_boot_cpu(void)
352 {
353         int cpuid = hard_smp_processor_id();
354
355         if (cpuid >= NR_CPUS) {
356                 prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
357                 prom_halt();
358         }
359         if (cpuid != 0)
360                 printk("boot cpu id != 0, this could work but is untested\n");
361
362         current_thread_info()->cpu = cpuid;
363         cpu_set(cpuid, cpu_online_map);
364         cpu_set(cpuid, phys_cpu_present_map);
365 }
366
367 int __cpuinit __cpu_up(unsigned int cpu)
368 {
369         extern int __cpuinit smp4m_boot_one_cpu(int);
370         extern int __cpuinit smp4d_boot_one_cpu(int);
371         int ret=0;
372
373         switch(sparc_cpu_model) {
374         case sun4:
375                 printk("SUN4\n");
376                 BUG();
377                 break;
378         case sun4c:
379                 printk("SUN4C\n");
380                 BUG();
381                 break;
382         case sun4m:
383                 ret = smp4m_boot_one_cpu(cpu);
384                 break;
385         case sun4d:
386                 ret = smp4d_boot_one_cpu(cpu);
387                 break;
388         case sun4e:
389                 printk("SUN4E\n");
390                 BUG();
391                 break;
392         case sun4u:
393                 printk("SUN4U\n");
394                 BUG();
395                 break;
396         default:
397                 printk("UNKNOWN!\n");
398                 BUG();
399                 break;
400         };
401
402         if (!ret) {
403                 cpu_set(cpu, smp_commenced_mask);
404                 while (!cpu_online(cpu))
405                         mb();
406         }
407         return ret;
408 }
409
410 void smp_bogo(struct seq_file *m)
411 {
412         int i;
413         
414         for_each_online_cpu(i) {
415                 seq_printf(m,
416                            "Cpu%dBogo\t: %lu.%02lu\n",
417                            i,
418                            cpu_data(i).udelay_val/(500000/HZ),
419                            (cpu_data(i).udelay_val/(5000/HZ))%100);
420         }
421 }
422
423 void smp_info(struct seq_file *m)
424 {
425         int i;
426
427         seq_printf(m, "State:\n");
428         for_each_online_cpu(i)
429                 seq_printf(m, "CPU%d\t\t: online\n", i);
430 }