Merge branch 'linus' into x86/memory-corruption-check
[linux-2.6] / arch / alpha / kernel / smp.c
1 /*
2  *      linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
34
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41
42 #include "proto.h"
43 #include "irq_impl.h"
44
45
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args)      printk args
49 #else
50 #define DBGS(args)
51 #endif
52
53 /* A collection of per-processor data.  */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 EXPORT_SYMBOL(cpu_data);
56
57 /* A collection of single bit ipi messages.  */
58 static struct {
59         unsigned long bits ____cacheline_aligned;
60 } ipi_data[NR_CPUS] __cacheline_aligned;
61
62 enum ipi_message_type {
63         IPI_RESCHEDULE,
64         IPI_CALL_FUNC,
65         IPI_CALL_FUNC_SINGLE,
66         IPI_CPU_STOP,
67 };
68
69 /* Set to a secondary's cpuid when it comes online.  */
70 static int smp_secondary_alive __devinitdata = 0;
71
72 /* Which cpus ids came online.  */
73 cpumask_t cpu_online_map;
74
75 EXPORT_SYMBOL(cpu_online_map);
76
77 int smp_num_probed;             /* Internal processor count */
78 int smp_num_cpus = 1;           /* Number that came online.  */
79 EXPORT_SYMBOL(smp_num_cpus);
80
81 /*
82  * Called by both boot and secondaries to move global data into
83  *  per-processor storage.
84  */
85 static inline void __init
86 smp_store_cpu_info(int cpuid)
87 {
88         cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
89         cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
90         cpu_data[cpuid].need_new_asn = 0;
91         cpu_data[cpuid].asn_lock = 0;
92 }
93
94 /*
95  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
96  */
97 static inline void __init
98 smp_setup_percpu_timer(int cpuid)
99 {
100         cpu_data[cpuid].prof_counter = 1;
101         cpu_data[cpuid].prof_multiplier = 1;
102 }
103
104 static void __init
105 wait_boot_cpu_to_stop(int cpuid)
106 {
107         unsigned long stop = jiffies + 10*HZ;
108
109         while (time_before(jiffies, stop)) {
110                 if (!smp_secondary_alive)
111                         return;
112                 barrier();
113         }
114
115         printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
116         for (;;)
117                 barrier();
118 }
119
120 /*
121  * Where secondaries begin a life of C.
122  */
123 void __init
124 smp_callin(void)
125 {
126         int cpuid = hard_smp_processor_id();
127
128         if (cpu_test_and_set(cpuid, cpu_online_map)) {
129                 printk("??, cpu 0x%x already present??\n", cpuid);
130                 BUG();
131         }
132
133         /* Turn on machine checks.  */
134         wrmces(7);
135
136         /* Set trap vectors.  */
137         trap_init();
138
139         /* Set interrupt vector.  */
140         wrent(entInt, 0);
141
142         /* Get our local ticker going. */
143         smp_setup_percpu_timer(cpuid);
144
145         /* Call platform-specific callin, if specified */
146         if (alpha_mv.smp_callin) alpha_mv.smp_callin();
147
148         /* All kernel threads share the same mm context.  */
149         atomic_inc(&init_mm.mm_count);
150         current->active_mm = &init_mm;
151
152         /* inform the notifiers about the new cpu */
153         notify_cpu_starting(cpuid);
154
155         /* Must have completely accurate bogos.  */
156         local_irq_enable();
157
158         /* Wait boot CPU to stop with irq enabled before running
159            calibrate_delay. */
160         wait_boot_cpu_to_stop(cpuid);
161         mb();
162         calibrate_delay();
163
164         smp_store_cpu_info(cpuid);
165         /* Allow master to continue only after we written loops_per_jiffy.  */
166         wmb();
167         smp_secondary_alive = 1;
168
169         DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
170               cpuid, current, current->active_mm));
171
172         /* Do nothing.  */
173         cpu_idle();
174 }
175
176 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
177 static int __devinit
178 wait_for_txrdy (unsigned long cpumask)
179 {
180         unsigned long timeout;
181
182         if (!(hwrpb->txrdy & cpumask))
183                 return 0;
184
185         timeout = jiffies + 10*HZ;
186         while (time_before(jiffies, timeout)) {
187                 if (!(hwrpb->txrdy & cpumask))
188                         return 0;
189                 udelay(10);
190                 barrier();
191         }
192
193         return -1;
194 }
195
196 /*
197  * Send a message to a secondary's console.  "START" is one such
198  * interesting message.  ;-)
199  */
200 static void __init
201 send_secondary_console_msg(char *str, int cpuid)
202 {
203         struct percpu_struct *cpu;
204         register char *cp1, *cp2;
205         unsigned long cpumask;
206         size_t len;
207
208         cpu = (struct percpu_struct *)
209                 ((char*)hwrpb
210                  + hwrpb->processor_offset
211                  + cpuid * hwrpb->processor_size);
212
213         cpumask = (1UL << cpuid);
214         if (wait_for_txrdy(cpumask))
215                 goto timeout;
216
217         cp2 = str;
218         len = strlen(cp2);
219         *(unsigned int *)&cpu->ipc_buffer[0] = len;
220         cp1 = (char *) &cpu->ipc_buffer[1];
221         memcpy(cp1, cp2, len);
222
223         /* atomic test and set */
224         wmb();
225         set_bit(cpuid, &hwrpb->rxrdy);
226
227         if (wait_for_txrdy(cpumask))
228                 goto timeout;
229         return;
230
231  timeout:
232         printk("Processor %x not ready\n", cpuid);
233 }
234
235 /*
236  * A secondary console wants to send a message.  Receive it.
237  */
238 static void
239 recv_secondary_console_msg(void)
240 {
241         int mycpu, i, cnt;
242         unsigned long txrdy = hwrpb->txrdy;
243         char *cp1, *cp2, buf[80];
244         struct percpu_struct *cpu;
245
246         DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
247
248         mycpu = hard_smp_processor_id();
249
250         for (i = 0; i < NR_CPUS; i++) {
251                 if (!(txrdy & (1UL << i)))
252                         continue;
253
254                 DBGS(("recv_secondary_console_msg: "
255                       "TXRDY contains CPU %d.\n", i));
256
257                 cpu = (struct percpu_struct *)
258                   ((char*)hwrpb
259                    + hwrpb->processor_offset
260                    + i * hwrpb->processor_size);
261
262                 DBGS(("recv_secondary_console_msg: on %d from %d"
263                       " HALT_REASON 0x%lx FLAGS 0x%lx\n",
264                       mycpu, i, cpu->halt_reason, cpu->flags));
265
266                 cnt = cpu->ipc_buffer[0] >> 32;
267                 if (cnt <= 0 || cnt >= 80)
268                         strcpy(buf, "<<< BOGUS MSG >>>");
269                 else {
270                         cp1 = (char *) &cpu->ipc_buffer[11];
271                         cp2 = buf;
272                         strcpy(cp2, cp1);
273                         
274                         while ((cp2 = strchr(cp2, '\r')) != 0) {
275                                 *cp2 = ' ';
276                                 if (cp2[1] == '\n')
277                                         cp2[1] = ' ';
278                         }
279                 }
280
281                 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
282                       "message is '%s'\n", mycpu, buf));
283         }
284
285         hwrpb->txrdy = 0;
286 }
287
288 /*
289  * Convince the console to have a secondary cpu begin execution.
290  */
291 static int __init
292 secondary_cpu_start(int cpuid, struct task_struct *idle)
293 {
294         struct percpu_struct *cpu;
295         struct pcb_struct *hwpcb, *ipcb;
296         unsigned long timeout;
297           
298         cpu = (struct percpu_struct *)
299                 ((char*)hwrpb
300                  + hwrpb->processor_offset
301                  + cpuid * hwrpb->processor_size);
302         hwpcb = (struct pcb_struct *) cpu->hwpcb;
303         ipcb = &task_thread_info(idle)->pcb;
304
305         /* Initialize the CPU's HWPCB to something just good enough for
306            us to get started.  Immediately after starting, we'll swpctx
307            to the target idle task's pcb.  Reuse the stack in the mean
308            time.  Precalculate the target PCBB.  */
309         hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310         hwpcb->usp = 0;
311         hwpcb->ptbr = ipcb->ptbr;
312         hwpcb->pcc = 0;
313         hwpcb->asn = 0;
314         hwpcb->unique = virt_to_phys(ipcb);
315         hwpcb->flags = ipcb->flags;
316         hwpcb->res1 = hwpcb->res2 = 0;
317
318 #if 0
319         DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
320               hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321 #endif
322         DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
323               cpuid, idle->state, ipcb->flags));
324
325         /* Setup HWRPB fields that SRM uses to activate secondary CPU */
326         hwrpb->CPU_restart = __smp_callin;
327         hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
328
329         /* Recalculate and update the HWRPB checksum */
330         hwrpb_update_checksum(hwrpb);
331
332         /*
333          * Send a "start" command to the specified processor.
334          */
335
336         /* SRM III 3.4.1.3 */
337         cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
338         cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
339         wmb();
340
341         send_secondary_console_msg("START\r\n", cpuid);
342
343         /* Wait 10 seconds for an ACK from the console.  */
344         timeout = jiffies + 10*HZ;
345         while (time_before(jiffies, timeout)) {
346                 if (cpu->flags & 1)
347                         goto started;
348                 udelay(10);
349                 barrier();
350         }
351         printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
352         return -1;
353
354  started:
355         DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
356         return 0;
357 }
358
359 /*
360  * Bring one cpu online.
361  */
362 static int __cpuinit
363 smp_boot_one_cpu(int cpuid)
364 {
365         struct task_struct *idle;
366         unsigned long timeout;
367
368         /* Cook up an idler for this guy.  Note that the address we
369            give to kernel_thread is irrelevant -- it's going to start
370            where HWRPB.CPU_restart says to start.  But this gets all
371            the other task-y sort of data structures set up like we
372            wish.  We can't use kernel_thread since we must avoid
373            rescheduling the child.  */
374         idle = fork_idle(cpuid);
375         if (IS_ERR(idle))
376                 panic("failed fork for CPU %d", cpuid);
377
378         DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
379               cpuid, idle->state, idle->flags));
380
381         /* Signal the secondary to wait a moment.  */
382         smp_secondary_alive = -1;
383
384         /* Whirrr, whirrr, whirrrrrrrrr... */
385         if (secondary_cpu_start(cpuid, idle))
386                 return -1;
387
388         /* Notify the secondary CPU it can run calibrate_delay.  */
389         mb();
390         smp_secondary_alive = 0;
391
392         /* We've been acked by the console; wait one second for
393            the task to start up for real.  */
394         timeout = jiffies + 1*HZ;
395         while (time_before(jiffies, timeout)) {
396                 if (smp_secondary_alive == 1)
397                         goto alive;
398                 udelay(10);
399                 barrier();
400         }
401
402         /* We failed to boot the CPU.  */
403
404         printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
405         return -1;
406
407  alive:
408         /* Another "Red Snapper". */
409         return 0;
410 }
411
412 /*
413  * Called from setup_arch.  Detect an SMP system and which processors
414  * are present.
415  */
416 void __init
417 setup_smp(void)
418 {
419         struct percpu_struct *cpubase, *cpu;
420         unsigned long i;
421
422         if (boot_cpuid != 0) {
423                 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
424                        boot_cpuid);
425         }
426
427         if (hwrpb->nr_processors > 1) {
428                 int boot_cpu_palrev;
429
430                 DBGS(("setup_smp: nr_processors %ld\n",
431                       hwrpb->nr_processors));
432
433                 cpubase = (struct percpu_struct *)
434                         ((char*)hwrpb + hwrpb->processor_offset);
435                 boot_cpu_palrev = cpubase->pal_revision;
436
437                 for (i = 0; i < hwrpb->nr_processors; i++) {
438                         cpu = (struct percpu_struct *)
439                                 ((char *)cpubase + i*hwrpb->processor_size);
440                         if ((cpu->flags & 0x1cc) == 0x1cc) {
441                                 smp_num_probed++;
442                                 cpu_set(i, cpu_present_map);
443                                 cpu->pal_revision = boot_cpu_palrev;
444                         }
445
446                         DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447                               i, cpu->flags, cpu->type));
448                         DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449                               i, cpu->pal_revision));
450                 }
451         } else {
452                 smp_num_probed = 1;
453         }
454
455         printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
456                smp_num_probed, cpu_present_map.bits[0]);
457 }
458
459 /*
460  * Called by smp_init prepare the secondaries
461  */
462 void __init
463 smp_prepare_cpus(unsigned int max_cpus)
464 {
465         /* Take care of some initial bookkeeping.  */
466         memset(ipi_data, 0, sizeof(ipi_data));
467
468         current_thread_info()->cpu = boot_cpuid;
469
470         smp_store_cpu_info(boot_cpuid);
471         smp_setup_percpu_timer(boot_cpuid);
472
473         /* Nothing to do on a UP box, or when told not to.  */
474         if (smp_num_probed == 1 || max_cpus == 0) {
475                 cpu_present_map = cpumask_of_cpu(boot_cpuid);
476                 printk(KERN_INFO "SMP mode deactivated.\n");
477                 return;
478         }
479
480         printk(KERN_INFO "SMP starting up secondaries.\n");
481
482         smp_num_cpus = smp_num_probed;
483 }
484
485 void __devinit
486 smp_prepare_boot_cpu(void)
487 {
488 }
489
490 int __cpuinit
491 __cpu_up(unsigned int cpu)
492 {
493         smp_boot_one_cpu(cpu);
494
495         return cpu_online(cpu) ? 0 : -ENOSYS;
496 }
497
498 void __init
499 smp_cpus_done(unsigned int max_cpus)
500 {
501         int cpu;
502         unsigned long bogosum = 0;
503
504         for(cpu = 0; cpu < NR_CPUS; cpu++) 
505                 if (cpu_online(cpu))
506                         bogosum += cpu_data[cpu].loops_per_jiffy;
507         
508         printk(KERN_INFO "SMP: Total of %d processors activated "
509                "(%lu.%02lu BogoMIPS).\n",
510                num_online_cpus(), 
511                (bogosum + 2500) / (500000/HZ),
512                ((bogosum + 2500) / (5000/HZ)) % 100);
513 }
514
515 \f
516 void
517 smp_percpu_timer_interrupt(struct pt_regs *regs)
518 {
519         struct pt_regs *old_regs;
520         int cpu = smp_processor_id();
521         unsigned long user = user_mode(regs);
522         struct cpuinfo_alpha *data = &cpu_data[cpu];
523
524         old_regs = set_irq_regs(regs);
525
526         /* Record kernel PC.  */
527         profile_tick(CPU_PROFILING);
528
529         if (!--data->prof_counter) {
530                 /* We need to make like a normal interrupt -- otherwise
531                    timer interrupts ignore the global interrupt lock,
532                    which would be a Bad Thing.  */
533                 irq_enter();
534
535                 update_process_times(user);
536
537                 data->prof_counter = data->prof_multiplier;
538
539                 irq_exit();
540         }
541         set_irq_regs(old_regs);
542 }
543
544 int
545 setup_profiling_timer(unsigned int multiplier)
546 {
547         return -EINVAL;
548 }
549
550 \f
551 static void
552 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
553 {
554         int i;
555
556         mb();
557         for_each_cpu_mask(i, to_whom)
558                 set_bit(operation, &ipi_data[i].bits);
559
560         mb();
561         for_each_cpu_mask(i, to_whom)
562                 wripir(i);
563 }
564
565 void
566 handle_ipi(struct pt_regs *regs)
567 {
568         int this_cpu = smp_processor_id();
569         unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
570         unsigned long ops;
571
572 #if 0
573         DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
574               this_cpu, *pending_ipis, regs->pc));
575 #endif
576
577         mb();   /* Order interrupt and bit testing. */
578         while ((ops = xchg(pending_ipis, 0)) != 0) {
579           mb(); /* Order bit clearing and data access. */
580           do {
581                 unsigned long which;
582
583                 which = ops & -ops;
584                 ops &= ~which;
585                 which = __ffs(which);
586
587                 switch (which) {
588                 case IPI_RESCHEDULE:
589                         /* Reschedule callback.  Everything to be done
590                            is done by the interrupt return path.  */
591                         break;
592
593                 case IPI_CALL_FUNC:
594                         generic_smp_call_function_interrupt();
595                         break;
596
597                 case IPI_CALL_FUNC_SINGLE:
598                         generic_smp_call_function_single_interrupt();
599                         break;
600
601                 case IPI_CPU_STOP:
602                         halt();
603
604                 default:
605                         printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
606                                this_cpu, which);
607                         break;
608                 }
609           } while (ops);
610
611           mb(); /* Order data access and bit testing. */
612         }
613
614         cpu_data[this_cpu].ipi_count++;
615
616         if (hwrpb->txrdy)
617                 recv_secondary_console_msg();
618 }
619
620 void
621 smp_send_reschedule(int cpu)
622 {
623 #ifdef DEBUG_IPI_MSG
624         if (cpu == hard_smp_processor_id())
625                 printk(KERN_WARNING
626                        "smp_send_reschedule: Sending IPI to self.\n");
627 #endif
628         send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
629 }
630
631 void
632 smp_send_stop(void)
633 {
634         cpumask_t to_whom = cpu_possible_map;
635         cpu_clear(smp_processor_id(), to_whom);
636 #ifdef DEBUG_IPI_MSG
637         if (hard_smp_processor_id() != boot_cpu_id)
638                 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
639 #endif
640         send_ipi_message(to_whom, IPI_CPU_STOP);
641 }
642
643 void arch_send_call_function_ipi(cpumask_t mask)
644 {
645         send_ipi_message(mask, IPI_CALL_FUNC);
646 }
647
648 void arch_send_call_function_single_ipi(int cpu)
649 {
650         send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
651 }
652
653 static void
654 ipi_imb(void *ignored)
655 {
656         imb();
657 }
658
659 void
660 smp_imb(void)
661 {
662         /* Must wait other processors to flush their icache before continue. */
663         if (on_each_cpu(ipi_imb, NULL, 1))
664                 printk(KERN_CRIT "smp_imb: timed out\n");
665 }
666 EXPORT_SYMBOL(smp_imb);
667
668 static void
669 ipi_flush_tlb_all(void *ignored)
670 {
671         tbia();
672 }
673
674 void
675 flush_tlb_all(void)
676 {
677         /* Although we don't have any data to pass, we do want to
678            synchronize with the other processors.  */
679         if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
680                 printk(KERN_CRIT "flush_tlb_all: timed out\n");
681         }
682 }
683
684 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
685
686 static void
687 ipi_flush_tlb_mm(void *x)
688 {
689         struct mm_struct *mm = (struct mm_struct *) x;
690         if (mm == current->active_mm && !asn_locked())
691                 flush_tlb_current(mm);
692         else
693                 flush_tlb_other(mm);
694 }
695
696 void
697 flush_tlb_mm(struct mm_struct *mm)
698 {
699         preempt_disable();
700
701         if (mm == current->active_mm) {
702                 flush_tlb_current(mm);
703                 if (atomic_read(&mm->mm_users) <= 1) {
704                         int cpu, this_cpu = smp_processor_id();
705                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
706                                 if (!cpu_online(cpu) || cpu == this_cpu)
707                                         continue;
708                                 if (mm->context[cpu])
709                                         mm->context[cpu] = 0;
710                         }
711                         preempt_enable();
712                         return;
713                 }
714         }
715
716         if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
717                 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
718         }
719
720         preempt_enable();
721 }
722 EXPORT_SYMBOL(flush_tlb_mm);
723
724 struct flush_tlb_page_struct {
725         struct vm_area_struct *vma;
726         struct mm_struct *mm;
727         unsigned long addr;
728 };
729
730 static void
731 ipi_flush_tlb_page(void *x)
732 {
733         struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
734         struct mm_struct * mm = data->mm;
735
736         if (mm == current->active_mm && !asn_locked())
737                 flush_tlb_current_page(mm, data->vma, data->addr);
738         else
739                 flush_tlb_other(mm);
740 }
741
742 void
743 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
744 {
745         struct flush_tlb_page_struct data;
746         struct mm_struct *mm = vma->vm_mm;
747
748         preempt_disable();
749
750         if (mm == current->active_mm) {
751                 flush_tlb_current_page(mm, vma, addr);
752                 if (atomic_read(&mm->mm_users) <= 1) {
753                         int cpu, this_cpu = smp_processor_id();
754                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
755                                 if (!cpu_online(cpu) || cpu == this_cpu)
756                                         continue;
757                                 if (mm->context[cpu])
758                                         mm->context[cpu] = 0;
759                         }
760                         preempt_enable();
761                         return;
762                 }
763         }
764
765         data.vma = vma;
766         data.mm = mm;
767         data.addr = addr;
768
769         if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
770                 printk(KERN_CRIT "flush_tlb_page: timed out\n");
771         }
772
773         preempt_enable();
774 }
775 EXPORT_SYMBOL(flush_tlb_page);
776
777 void
778 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
779 {
780         /* On the Alpha we always flush the whole user tlb.  */
781         flush_tlb_mm(vma->vm_mm);
782 }
783 EXPORT_SYMBOL(flush_tlb_range);
784
785 static void
786 ipi_flush_icache_page(void *x)
787 {
788         struct mm_struct *mm = (struct mm_struct *) x;
789         if (mm == current->active_mm && !asn_locked())
790                 __load_new_mm_context(mm);
791         else
792                 flush_tlb_other(mm);
793 }
794
795 void
796 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
797                         unsigned long addr, int len)
798 {
799         struct mm_struct *mm = vma->vm_mm;
800
801         if ((vma->vm_flags & VM_EXEC) == 0)
802                 return;
803
804         preempt_disable();
805
806         if (mm == current->active_mm) {
807                 __load_new_mm_context(mm);
808                 if (atomic_read(&mm->mm_users) <= 1) {
809                         int cpu, this_cpu = smp_processor_id();
810                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
811                                 if (!cpu_online(cpu) || cpu == this_cpu)
812                                         continue;
813                                 if (mm->context[cpu])
814                                         mm->context[cpu] = 0;
815                         }
816                         preempt_enable();
817                         return;
818                 }
819         }
820
821         if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
822                 printk(KERN_CRIT "flush_icache_page: timed out\n");
823         }
824
825         preempt_enable();
826 }