2 * TLB support routines.
4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8 * Modified RID allocation for SMP
9 * Goutham Rao <goutham.rao@intel.com>
10 * IPI based ptc implementation and A-step IPI implementation.
11 * Rohit Seth <rohit.seth@intel.com>
12 * Ken Chen <kenneth.w.chen@intel.com>
13 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
14 * Copyright (C) 2007 Intel Corp
15 * Fenghua Yu <fenghua.yu@intel.com>
16 * Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
24 #include <linux/bootmem.h>
26 #include <asm/delay.h>
27 #include <asm/mmu_context.h>
28 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
35 unsigned long mask; /* mask of supported purge page-sizes */
36 unsigned long max_bits; /* log2 of largest supported purge page-size */
39 struct ia64_ctx ia64_ctx = {
40 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
45 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
49 * Called after cpu_init() has setup ia64_ctx.max_ctx based on
50 * maximum RID that is supported by boot CPU.
53 mmu_context_init (void)
55 ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
56 ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
60 * Acquire the ia64_ctx.lock before calling this function!
63 wrap_mmu_context (struct mm_struct *mm)
66 unsigned long flush_bit;
68 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
69 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
70 ia64_ctx.bitmap[i] ^= flush_bit;
73 /* use offset at 300 to skip daemons */
74 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
75 ia64_ctx.max_ctx, 300);
76 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
77 ia64_ctx.max_ctx, ia64_ctx.next);
80 * can't call flush_tlb_all() here because of race condition
81 * with O(1) scheduler [EF]
83 cpu = get_cpu(); /* prevent preemption/migration */
84 for_each_online_cpu(i)
86 per_cpu(ia64_need_tlb_flush, i) = 1;
88 local_flush_tlb_all();
92 * Implement "spinaphores" ... like counting semaphores, but they
93 * spin instead of sleeping. If there are ever any other users for
94 * this primitive it can be moved up to a spinaphore.h header.
100 static inline void spinaphore_init(struct spinaphore *ss, int val)
102 atomic_set(&ss->cur, val);
105 static inline void down_spin(struct spinaphore *ss)
107 while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
108 while (atomic_read(&ss->cur) == 0)
112 static inline void up_spin(struct spinaphore *ss)
114 atomic_add(1, &ss->cur);
117 static struct spinaphore ptcg_sem;
118 static u16 nptcg = 1;
119 static int need_ptcg_sem = 1;
120 static int toolatetochangeptcgsem = 0;
123 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
124 * purges which is reported from either PAL or SAL PALO.
126 * We don't have sanity checking for nptcg value. It's the user's responsibility
127 * for valid nptcg value on the platform. Otherwise, kernel may hang in some
135 get_option(&str, &value);
136 setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
141 __setup("nptcg=", set_nptcg);
144 * Maximum number of simultaneous ptc.g purges in the system can
145 * be defined by PAL_VM_SUMMARY (in which case we should take
146 * the smallest value for any cpu in the system) or by the PAL
147 * override table (in which case we should ignore the value from
150 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
151 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
152 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
154 * Complicating the logic here is the fact that num_possible_cpus()
155 * isn't fully setup until we start bringing cpus online.
158 setup_ptcg_sem(int max_purges, int nptcg_from)
160 static int kp_override;
161 static int palo_override;
162 static int firstcpu = 1;
164 if (toolatetochangeptcgsem) {
165 BUG_ON(max_purges < nptcg);
169 if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
175 need_ptcg_sem = num_possible_cpus() > nptcg;
179 if (nptcg_from == NPTCG_FROM_PALO) {
182 /* In PALO max_purges == 0 really means it! */
184 panic("Whoa! Platform does not support global TLB purges.\n");
186 if (nptcg == PALO_MAX_TLB_PURGES) {
193 if (nptcg != PALO_MAX_TLB_PURGES)
194 need_ptcg_sem = (num_possible_cpus() > nptcg);
198 /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
199 if (max_purges == 0) max_purges = 1;
205 if (max_purges < nptcg)
207 if (nptcg == PAL_MAX_PURGES) {
211 need_ptcg_sem = (num_possible_cpus() > nptcg);
214 spinaphore_init(&ptcg_sem, max_purges);
218 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
219 unsigned long end, unsigned long nbits)
221 struct mm_struct *active_mm = current->active_mm;
223 toolatetochangeptcgsem = 1;
225 if (mm != active_mm) {
226 /* Restore region IDs for mm */
227 if (mm && active_mm) {
228 activate_context(mm);
236 down_spin(&ptcg_sem);
240 * Flush ALAT entries also.
242 ia64_ptcga(start, (nbits << 2));
244 start += (1UL << nbits);
245 } while (start < end);
250 if (mm != active_mm) {
251 activate_context(active_mm);
256 local_flush_tlb_all (void)
258 unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
260 addr = local_cpu_data->ptce_base;
261 count0 = local_cpu_data->ptce_count[0];
262 count1 = local_cpu_data->ptce_count[1];
263 stride0 = local_cpu_data->ptce_stride[0];
264 stride1 = local_cpu_data->ptce_stride[1];
266 local_irq_save(flags);
267 for (i = 0; i < count0; ++i) {
268 for (j = 0; j < count1; ++j) {
274 local_irq_restore(flags);
275 ia64_srlz_i(); /* srlz.i implies srlz.d */
279 flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
282 struct mm_struct *mm = vma->vm_mm;
283 unsigned long size = end - start;
287 if (mm != current->active_mm) {
293 nbits = ia64_fls(size + 0xfff);
294 while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
295 (nbits < purge.max_bits))
297 if (nbits > purge.max_bits)
298 nbits = purge.max_bits;
299 start &= ~((1UL << nbits) - 1);
303 if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
304 platform_global_tlb_purge(mm, start, end, nbits);
310 ia64_ptcl(start, (nbits<<2));
311 start += (1UL << nbits);
312 } while (start < end);
314 ia64_srlz_i(); /* srlz.i implies srlz.d */
316 EXPORT_SYMBOL(flush_tlb_range);
321 ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
322 unsigned long tr_pgbits;
325 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
326 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
327 "defaulting to architected purge page-sizes.\n", status);
328 purge.mask = 0x115557000UL;
330 purge.max_bits = ia64_fls(purge.mask);
332 ia64_get_ptce(&ptce_info);
333 local_cpu_data->ptce_base = ptce_info.base;
334 local_cpu_data->ptce_count[0] = ptce_info.count[0];
335 local_cpu_data->ptce_count[1] = ptce_info.count[1];
336 local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
337 local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
339 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */