2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/spinlock.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/proc_fs.h>
29 #include <linux/stat.h>
30 #include <linux/sysctl.h>
31 #include <linux/ctype.h>
32 #include <linux/cache.h>
33 #include <linux/init.h>
34 #include <linux/signal.h>
36 #include <asm/processor.h>
37 #include <asm/pgtable.h>
39 #include <asm/mmu_context.h>
41 #include <asm/types.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
44 #include <asm/machdep.h>
46 #include <asm/abs_addr.h>
47 #include <asm/tlbflush.h>
51 #include <asm/cacheflush.h>
52 #include <asm/cputable.h>
53 #include <asm/abs_addr.h>
54 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
65 #define DBG_LOW(fmt...)
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
84 extern unsigned long dart_tablebase;
85 #endif /* CONFIG_U3_DART */
87 static unsigned long _SDR1;
88 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
91 unsigned long htab_size_bytes;
92 unsigned long htab_hash_mask;
93 int mmu_linear_psize = MMU_PAGE_4K;
94 int mmu_virtual_psize = MMU_PAGE_4K;
95 #ifdef CONFIG_HUGETLB_PAGE
96 int mmu_huge_psize = MMU_PAGE_16M;
97 unsigned int HPAGE_SHIFT;
100 /* There are definitions of page sizes arrays to be used when none
101 * is provided by the firmware.
104 /* Pre-POWER4 CPUs (4k pages only)
106 struct mmu_psize_def mmu_psize_defaults_old[] = {
116 /* POWER4, GPUL, POWER5
118 * Support for 16Mb large pages
120 struct mmu_psize_def mmu_psize_defaults_gp[] = {
138 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
139 unsigned long pstart, unsigned long mode, int psize)
141 unsigned long vaddr, paddr;
142 unsigned int step, shift;
143 unsigned long tmp_mode;
146 shift = mmu_psize_defs[psize].shift;
149 for (vaddr = vstart, paddr = pstart; vaddr < vend;
150 vaddr += step, paddr += step) {
151 unsigned long vpn, hash, hpteg;
152 unsigned long vsid = get_kernel_vsid(vaddr);
153 unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
158 /* Make non-kernel text non-executable */
159 if (!in_kernel_text(vaddr))
160 tmp_mode = mode | HPTE_R_N;
162 hash = hpt_hash(va, shift);
163 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
165 /* The crap below can be cleaned once ppd_md.probe() can
166 * set up the hash callbacks, thus we can just used the
167 * normal insert callback here.
169 #ifdef CONFIG_PPC_ISERIES
170 if (_machine == PLATFORM_ISERIES_LPAR)
171 ret = iSeries_hpte_insert(hpteg, va,
178 #ifdef CONFIG_PPC_PSERIES
179 if (_machine & PLATFORM_LPAR)
180 ret = pSeries_lpar_hpte_insert(hpteg, va,
187 #ifdef CONFIG_PPC_MULTIPLATFORM
188 ret = native_hpte_insert(hpteg, va,
190 tmp_mode, HPTE_V_BOLTED,
196 return ret < 0 ? ret : 0;
199 static int __init htab_dt_scan_page_sizes(unsigned long node,
200 const char *uname, int depth,
203 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
205 unsigned long size = 0;
207 /* We are scanning "cpu" nodes only */
208 if (type == NULL || strcmp(type, "cpu") != 0)
211 prop = (u32 *)of_get_flat_dt_prop(node,
212 "ibm,segment-page-sizes", &size);
214 DBG("Page sizes from device-tree:\n");
216 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
218 unsigned int shift = prop[0];
219 unsigned int slbenc = prop[1];
220 unsigned int lpnum = prop[2];
221 unsigned int lpenc = 0;
222 struct mmu_psize_def *def;
225 size -= 3; prop += 3;
226 while(size > 0 && lpnum) {
227 if (prop[0] == shift)
229 prop += 2; size -= 2;
244 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
252 def = &mmu_psize_defs[idx];
257 def->avpnm = (1 << (shift - 23)) - 1;
260 /* We don't know for sure what's up with tlbiel, so
261 * for now we only set it for 4K and 64K pages
263 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
268 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
269 "tlbiel=%d, penc=%d\n",
270 idx, shift, def->sllp, def->avpnm, def->tlbiel,
279 static void __init htab_init_page_sizes(void)
283 /* Default to 4K pages only */
284 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
285 sizeof(mmu_psize_defaults_old));
288 * Try to find the available page sizes in the device-tree
290 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
291 if (rc != 0) /* Found */
295 * Not in the device-tree, let's fallback on known size
296 * list for 16M capable GP & GR
298 if ((_machine != PLATFORM_ISERIES_LPAR) &&
299 cpu_has_feature(CPU_FTR_16M_PAGE))
300 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
301 sizeof(mmu_psize_defaults_gp));
304 * Pick a size for the linear mapping. Currently, we only support
305 * 16M, 1M and 4K which is the default
307 if (mmu_psize_defs[MMU_PAGE_16M].shift)
308 mmu_linear_psize = MMU_PAGE_16M;
309 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
310 mmu_linear_psize = MMU_PAGE_1M;
313 * Pick a size for the ordinary pages. Default is 4K, we support
314 * 64K if cache inhibited large pages are supported by the
317 #ifdef CONFIG_PPC_64K_PAGES
318 if (mmu_psize_defs[MMU_PAGE_64K].shift &&
319 cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
320 mmu_virtual_psize = MMU_PAGE_64K;
323 printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
324 mmu_psize_defs[mmu_linear_psize].shift,
325 mmu_psize_defs[mmu_virtual_psize].shift);
327 #ifdef CONFIG_HUGETLB_PAGE
328 /* Init large page size. Currently, we pick 16M or 1M depending
329 * on what is available
331 if (mmu_psize_defs[MMU_PAGE_16M].shift)
332 mmu_huge_psize = MMU_PAGE_16M;
333 /* With 4k/4level pagetables, we can't (for now) cope with a
334 * huge page size < PMD_SIZE */
335 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
336 mmu_huge_psize = MMU_PAGE_1M;
338 /* Calculate HPAGE_SHIFT and sanity check it */
339 if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
340 mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
341 HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
343 HPAGE_SHIFT = 0; /* No huge pages dude ! */
344 #endif /* CONFIG_HUGETLB_PAGE */
347 static int __init htab_dt_scan_pftsize(unsigned long node,
348 const char *uname, int depth,
351 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
354 /* We are scanning "cpu" nodes only */
355 if (type == NULL || strcmp(type, "cpu") != 0)
358 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
360 /* pft_size[0] is the NUMA CEC cookie */
361 ppc64_pft_size = prop[1];
367 static unsigned long __init htab_get_table_size(void)
369 unsigned long mem_size, rnd_mem_size, pteg_count;
371 /* If hash size isn't already provided by the platform, we try to
372 * retrieve it from the device-tree. If it's not there neither, we
373 * calculate it now based on the total RAM size
375 if (ppc64_pft_size == 0)
376 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
378 return 1UL << ppc64_pft_size;
380 /* round mem_size up to next power of 2 */
381 mem_size = lmb_phys_mem_size();
382 rnd_mem_size = 1UL << __ilog2(mem_size);
383 if (rnd_mem_size < mem_size)
387 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
389 return pteg_count << 7;
392 #ifdef CONFIG_MEMORY_HOTPLUG
393 void create_section_mapping(unsigned long start, unsigned long end)
395 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
396 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
399 #endif /* CONFIG_MEMORY_HOTPLUG */
401 void __init htab_initialize(void)
404 unsigned long pteg_count;
405 unsigned long mode_rw;
406 unsigned long base = 0, size = 0;
409 extern unsigned long tce_alloc_start, tce_alloc_end;
411 DBG(" -> htab_initialize()\n");
413 /* Initialize page sizes */
414 htab_init_page_sizes();
417 * Calculate the required size of the htab. We want the number of
418 * PTEGs to equal one half the number of real pages.
420 htab_size_bytes = htab_get_table_size();
421 pteg_count = htab_size_bytes >> 7;
423 htab_hash_mask = pteg_count - 1;
425 if (firmware_has_feature(FW_FEATURE_LPAR)) {
426 /* Using a hypervisor which owns the htab */
430 /* Find storage for the HPT. Must be contiguous in
431 * the absolute address space.
433 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
435 DBG("Hash table allocated at %lx, size: %lx\n", table,
438 htab_address = abs_to_virt(table);
440 /* htab absolute addr + encoded htabsize */
441 _SDR1 = table + __ilog2(pteg_count) - 11;
443 /* Initialize the HPT with no entries */
444 memset((void *)table, 0, htab_size_bytes);
447 mtspr(SPRN_SDR1, _SDR1);
450 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
452 /* On U3 based machines, we need to reserve the DART area and
453 * _NOT_ map it to avoid cache paradoxes as it's remapped non
457 /* create bolted the linear mapping in the hash table */
458 for (i=0; i < lmb.memory.cnt; i++) {
459 base = (unsigned long)__va(lmb.memory.region[i].base);
460 size = lmb.memory.region[i].size;
462 DBG("creating mapping for region: %lx : %lx\n", base, size);
464 #ifdef CONFIG_U3_DART
465 /* Do not map the DART space. Fortunately, it will be aligned
466 * in such a way that it will not cross two lmb regions and
467 * will fit within a single 16Mb page.
468 * The DART space is assumed to be a full 16Mb region even if
469 * we only use 2Mb of that space. We will use more of it later
470 * for AGP GART. We have to use a full 16Mb large page.
472 DBG("DART base: %lx\n", dart_tablebase);
474 if (dart_tablebase != 0 && dart_tablebase >= base
475 && dart_tablebase < (base + size)) {
476 unsigned long dart_table_end = dart_tablebase + 16 * MB;
477 if (base != dart_tablebase)
478 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
481 if ((base + size) > dart_table_end)
482 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
484 __pa(dart_table_end),
489 #endif /* CONFIG_U3_DART */
490 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
491 mode_rw, mmu_linear_psize));
495 * If we have a memory_limit and we've allocated TCEs then we need to
496 * explicitly map the TCE area at the top of RAM. We also cope with the
497 * case that the TCEs start below memory_limit.
498 * tce_alloc_start/end are 16MB aligned so the mapping should work
499 * for either 4K or 16MB pages.
501 if (tce_alloc_start) {
502 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
503 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
505 if (base + size >= tce_alloc_start)
506 tce_alloc_start = base + size + 1;
508 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
509 __pa(tce_alloc_start), mode_rw,
513 DBG(" <- htab_initialize()\n");
518 void htab_initialize_secondary(void)
520 if (!firmware_has_feature(FW_FEATURE_LPAR))
521 mtspr(SPRN_SDR1, _SDR1);
525 * Called by asm hashtable.S for doing lazy icache flush
527 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
531 if (!pfn_valid(pte_pfn(pte)))
534 page = pte_page(pte);
537 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
539 __flush_dcache_icache(page_address(page));
540 set_bit(PG_arch_1, &page->flags);
549 * 1 - normal page fault
550 * -1 - critical hash insertion error
552 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
556 struct mm_struct *mm;
559 int rc, user_region = 0, local = 0;
561 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
564 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
565 DBG_LOW(" out of pgtable range !\n");
569 /* Get region & vsid */
570 switch (REGION_ID(ea)) {
575 DBG_LOW(" user region with no mm !\n");
578 vsid = get_vsid(mm->context.id, ea);
580 case VMALLOC_REGION_ID:
582 vsid = get_kernel_vsid(ea);
586 * Send the problem up to do_page_fault
590 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
597 /* Check CPU locality */
598 tmp = cpumask_of_cpu(smp_processor_id());
599 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
602 /* Handle hugepage regions */
603 if (unlikely(in_hugepage_area(mm->context, ea))) {
604 DBG_LOW(" -> huge page !\n");
605 return hash_huge_page(mm, access, ea, vsid, local, trap);
608 /* Get PTE and page size from page tables */
609 ptep = find_linux_pte(pgdir, ea);
610 if (ptep == NULL || !pte_present(*ptep)) {
611 DBG_LOW(" no PTE !\n");
615 #ifndef CONFIG_PPC_64K_PAGES
616 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
618 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
619 pte_val(*(ptep + PTRS_PER_PTE)));
621 /* Pre-check access permissions (will be re-checked atomically
622 * in __hash_page_XX but this pre-check is a fast path
624 if (access & ~pte_val(*ptep)) {
625 DBG_LOW(" no access !\n");
629 /* Do actual hashing */
630 #ifndef CONFIG_PPC_64K_PAGES
631 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
633 if (mmu_virtual_psize == MMU_PAGE_64K)
634 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
636 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
637 #endif /* CONFIG_PPC_64K_PAGES */
639 #ifndef CONFIG_PPC_64K_PAGES
640 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
642 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
643 pte_val(*(ptep + PTRS_PER_PTE)));
645 DBG_LOW(" -> rc=%d\n", rc);
648 EXPORT_SYMBOL_GPL(hash_page);
650 void hash_preload(struct mm_struct *mm, unsigned long ea,
651 unsigned long access, unsigned long trap)
660 /* We don't want huge pages prefaulted for now
662 if (unlikely(in_hugepage_area(mm->context, ea)))
665 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
666 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
668 /* Get PTE, VSID, access mask */
672 ptep = find_linux_pte(pgdir, ea);
675 vsid = get_vsid(mm->context.id, ea);
678 local_irq_save(flags);
679 mask = cpumask_of_cpu(smp_processor_id());
680 if (cpus_equal(mm->cpu_vm_mask, mask))
682 #ifndef CONFIG_PPC_64K_PAGES
683 __hash_page_4K(ea, access, vsid, ptep, trap, local);
685 if (mmu_virtual_psize == MMU_PAGE_64K)
686 __hash_page_64K(ea, access, vsid, ptep, trap, local);
688 __hash_page_4K(ea, access, vsid, ptep, trap, local);
689 #endif /* CONFIG_PPC_64K_PAGES */
690 local_irq_restore(flags);
693 void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
695 unsigned long hash, index, shift, hidx, slot;
697 DBG_LOW("flush_hash_page(va=%016x)\n", va);
698 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
699 hash = hpt_hash(va, shift);
700 hidx = __rpte_to_hidx(pte, index);
701 if (hidx & _PTEIDX_SECONDARY)
703 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
704 slot += hidx & _PTEIDX_GROUP_IX;
705 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
706 ppc_md.hpte_invalidate(slot, va, psize, local);
707 } pte_iterate_hashed_end();
710 void flush_hash_range(unsigned long number, int local)
712 if (ppc_md.flush_hash_range)
713 ppc_md.flush_hash_range(number, local);
716 struct ppc64_tlb_batch *batch =
717 &__get_cpu_var(ppc64_tlb_batch);
719 for (i = 0; i < number; i++)
720 flush_hash_page(batch->vaddr[i], batch->pte[i],
721 batch->psize, local);
725 static inline void make_bl(unsigned int *insn_addr, void *func)
727 unsigned long funcp = *((unsigned long *)func);
728 int offset = funcp - (unsigned long)insn_addr;
730 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
731 flush_icache_range((unsigned long)insn_addr, 4+
732 (unsigned long)insn_addr);
736 * low_hash_fault is called when we the low level hash code failed
737 * to instert a PTE due to an hypervisor error
739 void low_hash_fault(struct pt_regs *regs, unsigned long address)
741 if (user_mode(regs)) {
744 info.si_signo = SIGBUS;
746 info.si_code = BUS_ADRERR;
747 info.si_addr = (void __user *)address;
748 force_sig_info(SIGBUS, &info, current);
751 bad_page_fault(regs, address, SIGBUS);
754 void __init htab_finish_init(void)
756 extern unsigned int *htab_call_hpte_insert1;
757 extern unsigned int *htab_call_hpte_insert2;
758 extern unsigned int *htab_call_hpte_remove;
759 extern unsigned int *htab_call_hpte_updatepp;
761 #ifdef CONFIG_PPC_64K_PAGES
762 extern unsigned int *ht64_call_hpte_insert1;
763 extern unsigned int *ht64_call_hpte_insert2;
764 extern unsigned int *ht64_call_hpte_remove;
765 extern unsigned int *ht64_call_hpte_updatepp;
767 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
768 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
769 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
770 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
771 #endif /* CONFIG_PPC_64K_PAGES */
773 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
774 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
775 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
776 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);