Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[linux-2.6] / arch / alpha / kernel / smp.c
1 /*
2  *      linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 #include <linux/cpu.h>
31
32 #include <asm/hwrpb.h>
33 #include <asm/ptrace.h>
34 #include <asm/atomic.h>
35
36 #include <asm/io.h>
37 #include <asm/irq.h>
38 #include <asm/pgtable.h>
39 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/tlbflush.h>
42
43 #include "proto.h"
44 #include "irq_impl.h"
45
46
47 #define DEBUG_SMP 0
48 #if DEBUG_SMP
49 #define DBGS(args)      printk args
50 #else
51 #define DBGS(args)
52 #endif
53
54 /* A collection of per-processor data.  */
55 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 EXPORT_SYMBOL(cpu_data);
57
58 /* A collection of single bit ipi messages.  */
59 static struct {
60         unsigned long bits ____cacheline_aligned;
61 } ipi_data[NR_CPUS] __cacheline_aligned;
62
63 enum ipi_message_type {
64         IPI_RESCHEDULE,
65         IPI_CALL_FUNC,
66         IPI_CALL_FUNC_SINGLE,
67         IPI_CPU_STOP,
68 };
69
70 /* Set to a secondary's cpuid when it comes online.  */
71 static int smp_secondary_alive __devinitdata = 0;
72
73 /* Which cpus ids came online.  */
74 cpumask_t cpu_online_map;
75
76 EXPORT_SYMBOL(cpu_online_map);
77
78 int smp_num_probed;             /* Internal processor count */
79 int smp_num_cpus = 1;           /* Number that came online.  */
80 EXPORT_SYMBOL(smp_num_cpus);
81
82 /*
83  * Called by both boot and secondaries to move global data into
84  *  per-processor storage.
85  */
86 static inline void __init
87 smp_store_cpu_info(int cpuid)
88 {
89         cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
90         cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
91         cpu_data[cpuid].need_new_asn = 0;
92         cpu_data[cpuid].asn_lock = 0;
93 }
94
95 /*
96  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
97  */
98 static inline void __init
99 smp_setup_percpu_timer(int cpuid)
100 {
101         cpu_data[cpuid].prof_counter = 1;
102         cpu_data[cpuid].prof_multiplier = 1;
103 }
104
105 static void __init
106 wait_boot_cpu_to_stop(int cpuid)
107 {
108         unsigned long stop = jiffies + 10*HZ;
109
110         while (time_before(jiffies, stop)) {
111                 if (!smp_secondary_alive)
112                         return;
113                 barrier();
114         }
115
116         printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
117         for (;;)
118                 barrier();
119 }
120
121 /*
122  * Where secondaries begin a life of C.
123  */
124 void __cpuinit
125 smp_callin(void)
126 {
127         int cpuid = hard_smp_processor_id();
128
129         if (cpu_test_and_set(cpuid, cpu_online_map)) {
130                 printk("??, cpu 0x%x already present??\n", cpuid);
131                 BUG();
132         }
133
134         /* Turn on machine checks.  */
135         wrmces(7);
136
137         /* Set trap vectors.  */
138         trap_init();
139
140         /* Set interrupt vector.  */
141         wrent(entInt, 0);
142
143         /* Get our local ticker going. */
144         smp_setup_percpu_timer(cpuid);
145
146         /* Call platform-specific callin, if specified */
147         if (alpha_mv.smp_callin) alpha_mv.smp_callin();
148
149         /* All kernel threads share the same mm context.  */
150         atomic_inc(&init_mm.mm_count);
151         current->active_mm = &init_mm;
152
153         /* inform the notifiers about the new cpu */
154         notify_cpu_starting(cpuid);
155
156         /* Must have completely accurate bogos.  */
157         local_irq_enable();
158
159         /* Wait boot CPU to stop with irq enabled before running
160            calibrate_delay. */
161         wait_boot_cpu_to_stop(cpuid);
162         mb();
163         calibrate_delay();
164
165         smp_store_cpu_info(cpuid);
166         /* Allow master to continue only after we written loops_per_jiffy.  */
167         wmb();
168         smp_secondary_alive = 1;
169
170         DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
171               cpuid, current, current->active_mm));
172
173         /* Do nothing.  */
174         cpu_idle();
175 }
176
177 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
178 static int __devinit
179 wait_for_txrdy (unsigned long cpumask)
180 {
181         unsigned long timeout;
182
183         if (!(hwrpb->txrdy & cpumask))
184                 return 0;
185
186         timeout = jiffies + 10*HZ;
187         while (time_before(jiffies, timeout)) {
188                 if (!(hwrpb->txrdy & cpumask))
189                         return 0;
190                 udelay(10);
191                 barrier();
192         }
193
194         return -1;
195 }
196
197 /*
198  * Send a message to a secondary's console.  "START" is one such
199  * interesting message.  ;-)
200  */
201 static void __cpuinit
202 send_secondary_console_msg(char *str, int cpuid)
203 {
204         struct percpu_struct *cpu;
205         register char *cp1, *cp2;
206         unsigned long cpumask;
207         size_t len;
208
209         cpu = (struct percpu_struct *)
210                 ((char*)hwrpb
211                  + hwrpb->processor_offset
212                  + cpuid * hwrpb->processor_size);
213
214         cpumask = (1UL << cpuid);
215         if (wait_for_txrdy(cpumask))
216                 goto timeout;
217
218         cp2 = str;
219         len = strlen(cp2);
220         *(unsigned int *)&cpu->ipc_buffer[0] = len;
221         cp1 = (char *) &cpu->ipc_buffer[1];
222         memcpy(cp1, cp2, len);
223
224         /* atomic test and set */
225         wmb();
226         set_bit(cpuid, &hwrpb->rxrdy);
227
228         if (wait_for_txrdy(cpumask))
229                 goto timeout;
230         return;
231
232  timeout:
233         printk("Processor %x not ready\n", cpuid);
234 }
235
236 /*
237  * A secondary console wants to send a message.  Receive it.
238  */
239 static void
240 recv_secondary_console_msg(void)
241 {
242         int mycpu, i, cnt;
243         unsigned long txrdy = hwrpb->txrdy;
244         char *cp1, *cp2, buf[80];
245         struct percpu_struct *cpu;
246
247         DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
248
249         mycpu = hard_smp_processor_id();
250
251         for (i = 0; i < NR_CPUS; i++) {
252                 if (!(txrdy & (1UL << i)))
253                         continue;
254
255                 DBGS(("recv_secondary_console_msg: "
256                       "TXRDY contains CPU %d.\n", i));
257
258                 cpu = (struct percpu_struct *)
259                   ((char*)hwrpb
260                    + hwrpb->processor_offset
261                    + i * hwrpb->processor_size);
262
263                 DBGS(("recv_secondary_console_msg: on %d from %d"
264                       " HALT_REASON 0x%lx FLAGS 0x%lx\n",
265                       mycpu, i, cpu->halt_reason, cpu->flags));
266
267                 cnt = cpu->ipc_buffer[0] >> 32;
268                 if (cnt <= 0 || cnt >= 80)
269                         strcpy(buf, "<<< BOGUS MSG >>>");
270                 else {
271                         cp1 = (char *) &cpu->ipc_buffer[11];
272                         cp2 = buf;
273                         strcpy(cp2, cp1);
274                         
275                         while ((cp2 = strchr(cp2, '\r')) != 0) {
276                                 *cp2 = ' ';
277                                 if (cp2[1] == '\n')
278                                         cp2[1] = ' ';
279                         }
280                 }
281
282                 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
283                       "message is '%s'\n", mycpu, buf));
284         }
285
286         hwrpb->txrdy = 0;
287 }
288
289 /*
290  * Convince the console to have a secondary cpu begin execution.
291  */
292 static int __cpuinit
293 secondary_cpu_start(int cpuid, struct task_struct *idle)
294 {
295         struct percpu_struct *cpu;
296         struct pcb_struct *hwpcb, *ipcb;
297         unsigned long timeout;
298           
299         cpu = (struct percpu_struct *)
300                 ((char*)hwrpb
301                  + hwrpb->processor_offset
302                  + cpuid * hwrpb->processor_size);
303         hwpcb = (struct pcb_struct *) cpu->hwpcb;
304         ipcb = &task_thread_info(idle)->pcb;
305
306         /* Initialize the CPU's HWPCB to something just good enough for
307            us to get started.  Immediately after starting, we'll swpctx
308            to the target idle task's pcb.  Reuse the stack in the mean
309            time.  Precalculate the target PCBB.  */
310         hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
311         hwpcb->usp = 0;
312         hwpcb->ptbr = ipcb->ptbr;
313         hwpcb->pcc = 0;
314         hwpcb->asn = 0;
315         hwpcb->unique = virt_to_phys(ipcb);
316         hwpcb->flags = ipcb->flags;
317         hwpcb->res1 = hwpcb->res2 = 0;
318
319 #if 0
320         DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
321               hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
322 #endif
323         DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
324               cpuid, idle->state, ipcb->flags));
325
326         /* Setup HWRPB fields that SRM uses to activate secondary CPU */
327         hwrpb->CPU_restart = __smp_callin;
328         hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
329
330         /* Recalculate and update the HWRPB checksum */
331         hwrpb_update_checksum(hwrpb);
332
333         /*
334          * Send a "start" command to the specified processor.
335          */
336
337         /* SRM III 3.4.1.3 */
338         cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
339         cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
340         wmb();
341
342         send_secondary_console_msg("START\r\n", cpuid);
343
344         /* Wait 10 seconds for an ACK from the console.  */
345         timeout = jiffies + 10*HZ;
346         while (time_before(jiffies, timeout)) {
347                 if (cpu->flags & 1)
348                         goto started;
349                 udelay(10);
350                 barrier();
351         }
352         printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
353         return -1;
354
355  started:
356         DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
357         return 0;
358 }
359
360 /*
361  * Bring one cpu online.
362  */
363 static int __cpuinit
364 smp_boot_one_cpu(int cpuid)
365 {
366         struct task_struct *idle;
367         unsigned long timeout;
368
369         /* Cook up an idler for this guy.  Note that the address we
370            give to kernel_thread is irrelevant -- it's going to start
371            where HWRPB.CPU_restart says to start.  But this gets all
372            the other task-y sort of data structures set up like we
373            wish.  We can't use kernel_thread since we must avoid
374            rescheduling the child.  */
375         idle = fork_idle(cpuid);
376         if (IS_ERR(idle))
377                 panic("failed fork for CPU %d", cpuid);
378
379         DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
380               cpuid, idle->state, idle->flags));
381
382         /* Signal the secondary to wait a moment.  */
383         smp_secondary_alive = -1;
384
385         /* Whirrr, whirrr, whirrrrrrrrr... */
386         if (secondary_cpu_start(cpuid, idle))
387                 return -1;
388
389         /* Notify the secondary CPU it can run calibrate_delay.  */
390         mb();
391         smp_secondary_alive = 0;
392
393         /* We've been acked by the console; wait one second for
394            the task to start up for real.  */
395         timeout = jiffies + 1*HZ;
396         while (time_before(jiffies, timeout)) {
397                 if (smp_secondary_alive == 1)
398                         goto alive;
399                 udelay(10);
400                 barrier();
401         }
402
403         /* We failed to boot the CPU.  */
404
405         printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
406         return -1;
407
408  alive:
409         /* Another "Red Snapper". */
410         return 0;
411 }
412
413 /*
414  * Called from setup_arch.  Detect an SMP system and which processors
415  * are present.
416  */
417 void __init
418 setup_smp(void)
419 {
420         struct percpu_struct *cpubase, *cpu;
421         unsigned long i;
422
423         if (boot_cpuid != 0) {
424                 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
425                        boot_cpuid);
426         }
427
428         if (hwrpb->nr_processors > 1) {
429                 int boot_cpu_palrev;
430
431                 DBGS(("setup_smp: nr_processors %ld\n",
432                       hwrpb->nr_processors));
433
434                 cpubase = (struct percpu_struct *)
435                         ((char*)hwrpb + hwrpb->processor_offset);
436                 boot_cpu_palrev = cpubase->pal_revision;
437
438                 for (i = 0; i < hwrpb->nr_processors; i++) {
439                         cpu = (struct percpu_struct *)
440                                 ((char *)cpubase + i*hwrpb->processor_size);
441                         if ((cpu->flags & 0x1cc) == 0x1cc) {
442                                 smp_num_probed++;
443                                 cpu_set(i, cpu_present_map);
444                                 cpu->pal_revision = boot_cpu_palrev;
445                         }
446
447                         DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
448                               i, cpu->flags, cpu->type));
449                         DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
450                               i, cpu->pal_revision));
451                 }
452         } else {
453                 smp_num_probed = 1;
454         }
455
456         printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
457                smp_num_probed, cpu_present_map.bits[0]);
458 }
459
460 /*
461  * Called by smp_init prepare the secondaries
462  */
463 void __init
464 smp_prepare_cpus(unsigned int max_cpus)
465 {
466         /* Take care of some initial bookkeeping.  */
467         memset(ipi_data, 0, sizeof(ipi_data));
468
469         current_thread_info()->cpu = boot_cpuid;
470
471         smp_store_cpu_info(boot_cpuid);
472         smp_setup_percpu_timer(boot_cpuid);
473
474         /* Nothing to do on a UP box, or when told not to.  */
475         if (smp_num_probed == 1 || max_cpus == 0) {
476                 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477                 printk(KERN_INFO "SMP mode deactivated.\n");
478                 return;
479         }
480
481         printk(KERN_INFO "SMP starting up secondaries.\n");
482
483         smp_num_cpus = smp_num_probed;
484 }
485
486 void __devinit
487 smp_prepare_boot_cpu(void)
488 {
489 }
490
491 int __cpuinit
492 __cpu_up(unsigned int cpu)
493 {
494         smp_boot_one_cpu(cpu);
495
496         return cpu_online(cpu) ? 0 : -ENOSYS;
497 }
498
499 void __init
500 smp_cpus_done(unsigned int max_cpus)
501 {
502         int cpu;
503         unsigned long bogosum = 0;
504
505         for(cpu = 0; cpu < NR_CPUS; cpu++) 
506                 if (cpu_online(cpu))
507                         bogosum += cpu_data[cpu].loops_per_jiffy;
508         
509         printk(KERN_INFO "SMP: Total of %d processors activated "
510                "(%lu.%02lu BogoMIPS).\n",
511                num_online_cpus(), 
512                (bogosum + 2500) / (500000/HZ),
513                ((bogosum + 2500) / (5000/HZ)) % 100);
514 }
515
516 \f
517 void
518 smp_percpu_timer_interrupt(struct pt_regs *regs)
519 {
520         struct pt_regs *old_regs;
521         int cpu = smp_processor_id();
522         unsigned long user = user_mode(regs);
523         struct cpuinfo_alpha *data = &cpu_data[cpu];
524
525         old_regs = set_irq_regs(regs);
526
527         /* Record kernel PC.  */
528         profile_tick(CPU_PROFILING);
529
530         if (!--data->prof_counter) {
531                 /* We need to make like a normal interrupt -- otherwise
532                    timer interrupts ignore the global interrupt lock,
533                    which would be a Bad Thing.  */
534                 irq_enter();
535
536                 update_process_times(user);
537
538                 data->prof_counter = data->prof_multiplier;
539
540                 irq_exit();
541         }
542         set_irq_regs(old_regs);
543 }
544
545 int
546 setup_profiling_timer(unsigned int multiplier)
547 {
548         return -EINVAL;
549 }
550
551 \f
552 static void
553 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
554 {
555         int i;
556
557         mb();
558         for_each_cpu_mask(i, to_whom)
559                 set_bit(operation, &ipi_data[i].bits);
560
561         mb();
562         for_each_cpu_mask(i, to_whom)
563                 wripir(i);
564 }
565
566 void
567 handle_ipi(struct pt_regs *regs)
568 {
569         int this_cpu = smp_processor_id();
570         unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
571         unsigned long ops;
572
573 #if 0
574         DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
575               this_cpu, *pending_ipis, regs->pc));
576 #endif
577
578         mb();   /* Order interrupt and bit testing. */
579         while ((ops = xchg(pending_ipis, 0)) != 0) {
580           mb(); /* Order bit clearing and data access. */
581           do {
582                 unsigned long which;
583
584                 which = ops & -ops;
585                 ops &= ~which;
586                 which = __ffs(which);
587
588                 switch (which) {
589                 case IPI_RESCHEDULE:
590                         /* Reschedule callback.  Everything to be done
591                            is done by the interrupt return path.  */
592                         break;
593
594                 case IPI_CALL_FUNC:
595                         generic_smp_call_function_interrupt();
596                         break;
597
598                 case IPI_CALL_FUNC_SINGLE:
599                         generic_smp_call_function_single_interrupt();
600                         break;
601
602                 case IPI_CPU_STOP:
603                         halt();
604
605                 default:
606                         printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
607                                this_cpu, which);
608                         break;
609                 }
610           } while (ops);
611
612           mb(); /* Order data access and bit testing. */
613         }
614
615         cpu_data[this_cpu].ipi_count++;
616
617         if (hwrpb->txrdy)
618                 recv_secondary_console_msg();
619 }
620
621 void
622 smp_send_reschedule(int cpu)
623 {
624 #ifdef DEBUG_IPI_MSG
625         if (cpu == hard_smp_processor_id())
626                 printk(KERN_WARNING
627                        "smp_send_reschedule: Sending IPI to self.\n");
628 #endif
629         send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
630 }
631
632 void
633 smp_send_stop(void)
634 {
635         cpumask_t to_whom = cpu_possible_map;
636         cpu_clear(smp_processor_id(), to_whom);
637 #ifdef DEBUG_IPI_MSG
638         if (hard_smp_processor_id() != boot_cpu_id)
639                 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
640 #endif
641         send_ipi_message(to_whom, IPI_CPU_STOP);
642 }
643
644 void arch_send_call_function_ipi(cpumask_t mask)
645 {
646         send_ipi_message(mask, IPI_CALL_FUNC);
647 }
648
649 void arch_send_call_function_single_ipi(int cpu)
650 {
651         send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
652 }
653
654 static void
655 ipi_imb(void *ignored)
656 {
657         imb();
658 }
659
660 void
661 smp_imb(void)
662 {
663         /* Must wait other processors to flush their icache before continue. */
664         if (on_each_cpu(ipi_imb, NULL, 1))
665                 printk(KERN_CRIT "smp_imb: timed out\n");
666 }
667 EXPORT_SYMBOL(smp_imb);
668
669 static void
670 ipi_flush_tlb_all(void *ignored)
671 {
672         tbia();
673 }
674
675 void
676 flush_tlb_all(void)
677 {
678         /* Although we don't have any data to pass, we do want to
679            synchronize with the other processors.  */
680         if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
681                 printk(KERN_CRIT "flush_tlb_all: timed out\n");
682         }
683 }
684
685 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
686
687 static void
688 ipi_flush_tlb_mm(void *x)
689 {
690         struct mm_struct *mm = (struct mm_struct *) x;
691         if (mm == current->active_mm && !asn_locked())
692                 flush_tlb_current(mm);
693         else
694                 flush_tlb_other(mm);
695 }
696
697 void
698 flush_tlb_mm(struct mm_struct *mm)
699 {
700         preempt_disable();
701
702         if (mm == current->active_mm) {
703                 flush_tlb_current(mm);
704                 if (atomic_read(&mm->mm_users) <= 1) {
705                         int cpu, this_cpu = smp_processor_id();
706                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
707                                 if (!cpu_online(cpu) || cpu == this_cpu)
708                                         continue;
709                                 if (mm->context[cpu])
710                                         mm->context[cpu] = 0;
711                         }
712                         preempt_enable();
713                         return;
714                 }
715         }
716
717         if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
718                 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
719         }
720
721         preempt_enable();
722 }
723 EXPORT_SYMBOL(flush_tlb_mm);
724
725 struct flush_tlb_page_struct {
726         struct vm_area_struct *vma;
727         struct mm_struct *mm;
728         unsigned long addr;
729 };
730
731 static void
732 ipi_flush_tlb_page(void *x)
733 {
734         struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
735         struct mm_struct * mm = data->mm;
736
737         if (mm == current->active_mm && !asn_locked())
738                 flush_tlb_current_page(mm, data->vma, data->addr);
739         else
740                 flush_tlb_other(mm);
741 }
742
743 void
744 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
745 {
746         struct flush_tlb_page_struct data;
747         struct mm_struct *mm = vma->vm_mm;
748
749         preempt_disable();
750
751         if (mm == current->active_mm) {
752                 flush_tlb_current_page(mm, vma, addr);
753                 if (atomic_read(&mm->mm_users) <= 1) {
754                         int cpu, this_cpu = smp_processor_id();
755                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
756                                 if (!cpu_online(cpu) || cpu == this_cpu)
757                                         continue;
758                                 if (mm->context[cpu])
759                                         mm->context[cpu] = 0;
760                         }
761                         preempt_enable();
762                         return;
763                 }
764         }
765
766         data.vma = vma;
767         data.mm = mm;
768         data.addr = addr;
769
770         if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
771                 printk(KERN_CRIT "flush_tlb_page: timed out\n");
772         }
773
774         preempt_enable();
775 }
776 EXPORT_SYMBOL(flush_tlb_page);
777
778 void
779 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
780 {
781         /* On the Alpha we always flush the whole user tlb.  */
782         flush_tlb_mm(vma->vm_mm);
783 }
784 EXPORT_SYMBOL(flush_tlb_range);
785
786 static void
787 ipi_flush_icache_page(void *x)
788 {
789         struct mm_struct *mm = (struct mm_struct *) x;
790         if (mm == current->active_mm && !asn_locked())
791                 __load_new_mm_context(mm);
792         else
793                 flush_tlb_other(mm);
794 }
795
796 void
797 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
798                         unsigned long addr, int len)
799 {
800         struct mm_struct *mm = vma->vm_mm;
801
802         if ((vma->vm_flags & VM_EXEC) == 0)
803                 return;
804
805         preempt_disable();
806
807         if (mm == current->active_mm) {
808                 __load_new_mm_context(mm);
809                 if (atomic_read(&mm->mm_users) <= 1) {
810                         int cpu, this_cpu = smp_processor_id();
811                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
812                                 if (!cpu_online(cpu) || cpu == this_cpu)
813                                         continue;
814                                 if (mm->context[cpu])
815                                         mm->context[cpu] = 0;
816                         }
817                         preempt_enable();
818                         return;
819                 }
820         }
821
822         if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
823                 printk(KERN_CRIT "flush_icache_page: timed out\n");
824         }
825
826         preempt_enable();
827 }