2 * native hashtable management.
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/threads.h>
18 #include <linux/smp.h>
20 #include <asm/abs_addr.h>
21 #include <asm/machdep.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
27 #include <asm/cputable.h>
29 #include <asm/kexec.h>
32 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #define DBG_LOW(fmt...)
37 #define HPTE_LOCK_BIT 3
39 static DEFINE_SPINLOCK(native_tlbie_lock);
41 static inline void __tlbie(unsigned long va, unsigned int psize)
45 /* clear top 16 bits, non SLS segment */
46 va &= ~(0xffffULL << 48);
51 asm volatile("tlbie %0,0" : : "r" (va) : "memory");
54 penc = mmu_psize_defs[psize].penc;
55 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
57 asm volatile("tlbie %0,1" : : "r" (va) : "memory");
62 static inline void __tlbiel(unsigned long va, unsigned int psize)
66 /* clear top 16 bits, non SLS segment */
67 va &= ~(0xffffULL << 48);
72 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
73 : : "r"(va) : "memory");
76 penc = mmu_psize_defs[psize].penc;
77 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
79 asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
80 : : "r"(va) : "memory");
86 static inline void tlbie(unsigned long va, int psize, int local)
88 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
89 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
92 use_local = mmu_psize_defs[psize].tlbiel;
93 if (lock_tlbie && !use_local)
94 spin_lock(&native_tlbie_lock);
95 asm volatile("ptesync": : :"memory");
98 asm volatile("ptesync": : :"memory");
101 asm volatile("eieio; tlbsync; ptesync": : :"memory");
103 if (lock_tlbie && !use_local)
104 spin_unlock(&native_tlbie_lock);
107 static inline void native_lock_hpte(struct hash_pte *hptep)
109 unsigned long *word = &hptep->v;
112 if (!test_and_set_bit(HPTE_LOCK_BIT, word))
114 while(test_bit(HPTE_LOCK_BIT, word))
119 static inline void native_unlock_hpte(struct hash_pte *hptep)
121 unsigned long *word = &hptep->v;
123 asm volatile("lwsync":::"memory");
124 clear_bit(HPTE_LOCK_BIT, word);
127 static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
128 unsigned long pa, unsigned long rflags,
129 unsigned long vflags, int psize)
131 struct hash_pte *hptep = htab_address + hpte_group;
132 unsigned long hpte_v, hpte_r;
135 if (!(vflags & HPTE_V_BOLTED)) {
136 DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
137 " rflags=%lx, vflags=%lx, psize=%d)\n",
138 hpte_group, va, pa, rflags, vflags, psize);
141 for (i = 0; i < HPTES_PER_GROUP; i++) {
142 if (! (hptep->v & HPTE_V_VALID)) {
143 /* retry with lock held */
144 native_lock_hpte(hptep);
145 if (! (hptep->v & HPTE_V_VALID))
147 native_unlock_hpte(hptep);
153 if (i == HPTES_PER_GROUP)
156 hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
157 hpte_r = hpte_encode_r(pa, psize) | rflags;
159 if (!(vflags & HPTE_V_BOLTED)) {
160 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
165 /* Guarantee the second dword is visible before the valid bit */
168 * Now set the first dword including the valid bit
169 * NOTE: this also unlocks the hpte
173 __asm__ __volatile__ ("ptesync" : : : "memory");
175 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
178 static long native_hpte_remove(unsigned long hpte_group)
180 struct hash_pte *hptep;
183 unsigned long hpte_v;
185 DBG_LOW(" remove(group=%lx)\n", hpte_group);
187 /* pick a random entry to start at */
188 slot_offset = mftb() & 0x7;
190 for (i = 0; i < HPTES_PER_GROUP; i++) {
191 hptep = htab_address + hpte_group + slot_offset;
194 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
195 /* retry with lock held */
196 native_lock_hpte(hptep);
198 if ((hpte_v & HPTE_V_VALID)
199 && !(hpte_v & HPTE_V_BOLTED))
201 native_unlock_hpte(hptep);
208 if (i == HPTES_PER_GROUP)
211 /* Invalidate the hpte. NOTE: this also unlocks it */
217 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
218 unsigned long va, int psize, int local)
220 struct hash_pte *hptep = htab_address + slot;
221 unsigned long hpte_v, want_v;
224 want_v = hpte_encode_v(va, psize);
226 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
227 va, want_v & HPTE_V_AVPN, slot, newpp);
229 native_lock_hpte(hptep);
233 /* Even if we miss, we need to invalidate the TLB */
234 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
235 DBG_LOW(" -> miss\n");
238 DBG_LOW(" -> hit\n");
239 /* Update the HPTE */
240 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
241 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
243 native_unlock_hpte(hptep);
245 /* Ensure it is out of the tlb too. */
246 tlbie(va, psize, local);
251 static long native_hpte_find(unsigned long va, int psize)
253 struct hash_pte *hptep;
257 unsigned long want_v, hpte_v;
259 hash = hpt_hash(va, mmu_psize_defs[psize].shift);
260 want_v = hpte_encode_v(va, psize);
262 for (j = 0; j < 2; j++) {
263 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
264 for (i = 0; i < HPTES_PER_GROUP; i++) {
265 hptep = htab_address + slot;
268 if (HPTE_V_COMPARE(hpte_v, want_v)
269 && (hpte_v & HPTE_V_VALID)
270 && ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
285 * Update the page protection bits. Intended to be used to create
286 * guard pages for kernel data structures on pages which are bolted
287 * in the HPT. Assumes pages being operated on will not be stolen.
289 * No need to lock here because we should be the only user.
291 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
294 unsigned long vsid, va;
296 struct hash_pte *hptep;
298 vsid = get_kernel_vsid(ea);
299 va = (vsid << 28) | (ea & 0x0fffffff);
301 slot = native_hpte_find(va, psize);
303 panic("could not find page to bolt\n");
304 hptep = htab_address + slot;
306 /* Update the HPTE */
307 hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
308 (newpp & (HPTE_R_PP | HPTE_R_N));
310 /* Ensure it is out of the tlb too. */
314 static void native_hpte_invalidate(unsigned long slot, unsigned long va,
315 int psize, int local)
317 struct hash_pte *hptep = htab_address + slot;
318 unsigned long hpte_v;
319 unsigned long want_v;
322 local_irq_save(flags);
324 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
326 want_v = hpte_encode_v(va, psize);
327 native_lock_hpte(hptep);
330 /* Even if we miss, we need to invalidate the TLB */
331 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
332 native_unlock_hpte(hptep);
334 /* Invalidate the hpte. NOTE: this also unlocks it */
337 /* Invalidate the TLB */
338 tlbie(va, psize, local);
340 local_irq_restore(flags);
345 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
347 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
348 int *psize, unsigned long *va)
350 unsigned long hpte_r = hpte->r;
351 unsigned long hpte_v = hpte->v;
353 int i, size, shift, penc;
355 if (!(hpte_v & HPTE_V_LARGE))
358 for (i = 0; i < LP_BITS; i++) {
359 if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
362 penc = LP_MASK(i+1) >> LP_SHIFT;
363 for (size = 0; size < MMU_PAGE_COUNT; size++) {
365 /* 4K pages are not represented by LP */
366 if (size == MMU_PAGE_4K)
369 /* valid entries have a shift value */
370 if (!mmu_psize_defs[size].shift)
373 if (penc == mmu_psize_defs[size].penc)
378 /* This works for all page sizes, and for 256M and 1T segments */
379 shift = mmu_psize_defs[size].shift;
380 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
383 unsigned long vpi, vsid, pteg;
385 pteg = slot / HPTES_PER_GROUP;
386 if (hpte_v & HPTE_V_SECONDARY)
388 switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
389 case MMU_SEGSIZE_256M:
390 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
394 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
397 avpn = vpi = size = 0;
399 avpn |= (vpi << mmu_psize_defs[size].shift);
407 * clear all mappings on kexec. All cpus are in real mode (or they will
408 * be when they isi), and we are the only one left. We rely on our kernel
409 * mapping being 0xC0's and the hardware ignoring those two real bits.
411 * TODO: add batching support when enabled. remember, no dynamic memory here,
412 * athough there is the control page available...
414 static void native_hpte_clear(void)
416 unsigned long slot, slots, flags;
417 struct hash_pte *hptep = htab_address;
418 unsigned long hpte_v, va;
419 unsigned long pteg_count;
422 pteg_count = htab_hash_mask + 1;
424 local_irq_save(flags);
426 /* we take the tlbie lock and hold it. Some hardware will
427 * deadlock if we try to tlbie from two processors at once.
429 spin_lock(&native_tlbie_lock);
431 slots = pteg_count * HPTES_PER_GROUP;
433 for (slot = 0; slot < slots; slot++, hptep++) {
435 * we could lock the pte here, but we are the only cpu
436 * running, right? and for crash dump, we probably
437 * don't want to wait for a maybe bad cpu.
442 * Call __tlbie() here rather than tlbie() since we
443 * already hold the native_tlbie_lock.
445 if (hpte_v & HPTE_V_VALID) {
446 hpte_decode(hptep, slot, &psize, &va);
452 asm volatile("eieio; tlbsync; ptesync":::"memory");
453 spin_unlock(&native_tlbie_lock);
454 local_irq_restore(flags);
458 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
459 * the lock all the time
461 static void native_flush_hash_range(unsigned long number, int local)
463 unsigned long va, hash, index, hidx, shift, slot;
464 struct hash_pte *hptep;
465 unsigned long hpte_v;
466 unsigned long want_v;
469 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
470 unsigned long psize = batch->psize;
473 local_irq_save(flags);
475 for (i = 0; i < number; i++) {
476 va = batch->vaddr[i];
479 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
480 hash = hpt_hash(va, shift);
481 hidx = __rpte_to_hidx(pte, index);
482 if (hidx & _PTEIDX_SECONDARY)
484 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
485 slot += hidx & _PTEIDX_GROUP_IX;
486 hptep = htab_address + slot;
487 want_v = hpte_encode_v(va, psize);
488 native_lock_hpte(hptep);
490 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
491 !(hpte_v & HPTE_V_VALID))
492 native_unlock_hpte(hptep);
495 } pte_iterate_hashed_end();
498 if (cpu_has_feature(CPU_FTR_TLBIEL) &&
499 mmu_psize_defs[psize].tlbiel && local) {
500 asm volatile("ptesync":::"memory");
501 for (i = 0; i < number; i++) {
502 va = batch->vaddr[i];
505 pte_iterate_hashed_subpages(pte, psize, va, index,
508 } pte_iterate_hashed_end();
510 asm volatile("ptesync":::"memory");
512 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
515 spin_lock(&native_tlbie_lock);
517 asm volatile("ptesync":::"memory");
518 for (i = 0; i < number; i++) {
519 va = batch->vaddr[i];
522 pte_iterate_hashed_subpages(pte, psize, va, index,
525 } pte_iterate_hashed_end();
527 asm volatile("eieio; tlbsync; ptesync":::"memory");
530 spin_unlock(&native_tlbie_lock);
533 local_irq_restore(flags);
536 #ifdef CONFIG_PPC_PSERIES
537 /* Disable TLB batching on nighthawk */
538 static inline int tlb_batching_enabled(void)
540 struct device_node *root = of_find_node_by_path("/");
544 const char *model = of_get_property(root, "model", NULL);
545 if (model && !strcmp(model, "IBM,9076-N81"))
553 static inline int tlb_batching_enabled(void)
559 void __init hpte_init_native(void)
561 ppc_md.hpte_invalidate = native_hpte_invalidate;
562 ppc_md.hpte_updatepp = native_hpte_updatepp;
563 ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
564 ppc_md.hpte_insert = native_hpte_insert;
565 ppc_md.hpte_remove = native_hpte_remove;
566 ppc_md.hpte_clear_all = native_hpte_clear;
567 if (tlb_batching_enabled())
568 ppc_md.flush_hash_range = native_flush_hash_range;