2 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
3 * {mikejc|engebret}@us.ibm.com
5 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
7 * SMP scalability work:
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
13 * PowerPC Hashed Page Table functions
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/spinlock.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/proc_fs.h>
28 #include <linux/stat.h>
29 #include <linux/sysctl.h>
30 #include <linux/ctype.h>
31 #include <linux/cache.h>
32 #include <linux/init.h>
33 #include <linux/signal.h>
35 #include <asm/processor.h>
36 #include <asm/pgtable.h>
38 #include <asm/mmu_context.h>
40 #include <asm/types.h>
41 #include <asm/system.h>
42 #include <asm/uaccess.h>
43 #include <asm/machdep.h>
45 #include <asm/abs_addr.h>
46 #include <asm/tlbflush.h>
50 #include <asm/cacheflush.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
57 #define DBG(fmt...) udbg_printf(fmt)
63 #define DBG_LOW(fmt...) udbg_printf(fmt)
65 #define DBG_LOW(fmt...)
72 * Note: pte --> Linux PTE
73 * HPTE --> PowerPC Hashed Page Table Entry
76 * htab_initialize is called with the MMU off (of course), but
77 * the kernel has been copied down to zero so it can directly
78 * reference global data. At this point it is very difficult
79 * to print debug info.
84 extern unsigned long dart_tablebase;
85 #endif /* CONFIG_U3_DART */
87 static unsigned long _SDR1;
88 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
90 struct hash_pte *htab_address;
91 unsigned long htab_size_bytes;
92 unsigned long htab_hash_mask;
93 int mmu_linear_psize = MMU_PAGE_4K;
94 int mmu_virtual_psize = MMU_PAGE_4K;
95 int mmu_vmalloc_psize = MMU_PAGE_4K;
96 int mmu_io_psize = MMU_PAGE_4K;
97 int mmu_kernel_ssize = MMU_SEGSIZE_256M;
98 int mmu_highuser_ssize = MMU_SEGSIZE_256M;
99 u16 mmu_slb_size = 64;
100 #ifdef CONFIG_HUGETLB_PAGE
101 int mmu_huge_psize = MMU_PAGE_16M;
102 unsigned int HPAGE_SHIFT;
104 #ifdef CONFIG_PPC_64K_PAGES
105 int mmu_ci_restrictions;
107 #ifdef CONFIG_DEBUG_PAGEALLOC
108 static u8 *linear_map_hash_slots;
109 static unsigned long linear_map_hash_count;
110 static DEFINE_SPINLOCK(linear_map_hash_lock);
111 #endif /* CONFIG_DEBUG_PAGEALLOC */
113 /* There are definitions of page sizes arrays to be used when none
114 * is provided by the firmware.
117 /* Pre-POWER4 CPUs (4k pages only)
119 struct mmu_psize_def mmu_psize_defaults_old[] = {
129 /* POWER4, GPUL, POWER5
131 * Support for 16Mb large pages
133 struct mmu_psize_def mmu_psize_defaults_gp[] = {
151 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
152 unsigned long pstart, unsigned long mode,
153 int psize, int ssize)
155 unsigned long vaddr, paddr;
156 unsigned int step, shift;
157 unsigned long tmp_mode;
160 shift = mmu_psize_defs[psize].shift;
163 for (vaddr = vstart, paddr = pstart; vaddr < vend;
164 vaddr += step, paddr += step) {
165 unsigned long hash, hpteg;
166 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
167 unsigned long va = hpt_va(vaddr, vsid, ssize);
171 /* Make non-kernel text non-executable */
172 if (!in_kernel_text(vaddr))
173 tmp_mode = mode | HPTE_R_N;
175 hash = hpt_hash(va, shift, ssize);
176 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
178 DBG("htab_bolt_mapping: calling %p\n", ppc_md.hpte_insert);
180 BUG_ON(!ppc_md.hpte_insert);
181 ret = ppc_md.hpte_insert(hpteg, va, paddr,
182 tmp_mode, HPTE_V_BOLTED, psize, ssize);
186 #ifdef CONFIG_DEBUG_PAGEALLOC
187 if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
188 linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
189 #endif /* CONFIG_DEBUG_PAGEALLOC */
191 return ret < 0 ? ret : 0;
194 static int __init htab_dt_scan_seg_sizes(unsigned long node,
195 const char *uname, int depth,
198 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
200 unsigned long size = 0;
202 /* We are scanning "cpu" nodes only */
203 if (type == NULL || strcmp(type, "cpu") != 0)
206 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
210 for (; size >= 4; size -= 4, ++prop) {
212 DBG("1T segment support detected\n");
213 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT;
217 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B;
221 static void __init htab_init_seg_sizes(void)
223 of_scan_flat_dt(htab_dt_scan_seg_sizes, NULL);
226 static int __init htab_dt_scan_page_sizes(unsigned long node,
227 const char *uname, int depth,
230 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
232 unsigned long size = 0;
234 /* We are scanning "cpu" nodes only */
235 if (type == NULL || strcmp(type, "cpu") != 0)
238 prop = (u32 *)of_get_flat_dt_prop(node,
239 "ibm,segment-page-sizes", &size);
241 DBG("Page sizes from device-tree:\n");
243 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
245 unsigned int shift = prop[0];
246 unsigned int slbenc = prop[1];
247 unsigned int lpnum = prop[2];
248 unsigned int lpenc = 0;
249 struct mmu_psize_def *def;
252 size -= 3; prop += 3;
253 while(size > 0 && lpnum) {
254 if (prop[0] == shift)
256 prop += 2; size -= 2;
271 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
279 def = &mmu_psize_defs[idx];
284 def->avpnm = (1 << (shift - 23)) - 1;
287 /* We don't know for sure what's up with tlbiel, so
288 * for now we only set it for 4K and 64K pages
290 if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
295 DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
296 "tlbiel=%d, penc=%d\n",
297 idx, shift, def->sllp, def->avpnm, def->tlbiel,
305 static void __init htab_init_page_sizes(void)
309 /* Default to 4K pages only */
310 memcpy(mmu_psize_defs, mmu_psize_defaults_old,
311 sizeof(mmu_psize_defaults_old));
314 * Try to find the available page sizes in the device-tree
316 rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
317 if (rc != 0) /* Found */
321 * Not in the device-tree, let's fallback on known size
322 * list for 16M capable GP & GR
324 if (cpu_has_feature(CPU_FTR_16M_PAGE))
325 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
326 sizeof(mmu_psize_defaults_gp));
328 #ifndef CONFIG_DEBUG_PAGEALLOC
330 * Pick a size for the linear mapping. Currently, we only support
331 * 16M, 1M and 4K which is the default
333 if (mmu_psize_defs[MMU_PAGE_16M].shift)
334 mmu_linear_psize = MMU_PAGE_16M;
335 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
336 mmu_linear_psize = MMU_PAGE_1M;
337 #endif /* CONFIG_DEBUG_PAGEALLOC */
339 #ifdef CONFIG_PPC_64K_PAGES
341 * Pick a size for the ordinary pages. Default is 4K, we support
342 * 64K for user mappings and vmalloc if supported by the processor.
343 * We only use 64k for ioremap if the processor
344 * (and firmware) support cache-inhibited large pages.
345 * If not, we use 4k and set mmu_ci_restrictions so that
346 * hash_page knows to switch processes that use cache-inhibited
347 * mappings to 4k pages.
349 if (mmu_psize_defs[MMU_PAGE_64K].shift) {
350 mmu_virtual_psize = MMU_PAGE_64K;
351 mmu_vmalloc_psize = MMU_PAGE_64K;
352 if (mmu_linear_psize == MMU_PAGE_4K)
353 mmu_linear_psize = MMU_PAGE_64K;
354 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
355 mmu_io_psize = MMU_PAGE_64K;
357 mmu_ci_restrictions = 1;
359 #endif /* CONFIG_PPC_64K_PAGES */
361 printk(KERN_DEBUG "Page orders: linear mapping = %d, "
362 "virtual = %d, io = %d\n",
363 mmu_psize_defs[mmu_linear_psize].shift,
364 mmu_psize_defs[mmu_virtual_psize].shift,
365 mmu_psize_defs[mmu_io_psize].shift);
367 #ifdef CONFIG_HUGETLB_PAGE
368 /* Init large page size. Currently, we pick 16M or 1M depending
369 * on what is available
371 if (mmu_psize_defs[MMU_PAGE_16M].shift)
372 set_huge_psize(MMU_PAGE_16M);
373 /* With 4k/4level pagetables, we can't (for now) cope with a
374 * huge page size < PMD_SIZE */
375 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
376 set_huge_psize(MMU_PAGE_1M);
377 #endif /* CONFIG_HUGETLB_PAGE */
380 static int __init htab_dt_scan_pftsize(unsigned long node,
381 const char *uname, int depth,
384 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
387 /* We are scanning "cpu" nodes only */
388 if (type == NULL || strcmp(type, "cpu") != 0)
391 prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
393 /* pft_size[0] is the NUMA CEC cookie */
394 ppc64_pft_size = prop[1];
400 static unsigned long __init htab_get_table_size(void)
402 unsigned long mem_size, rnd_mem_size, pteg_count;
404 /* If hash size isn't already provided by the platform, we try to
405 * retrieve it from the device-tree. If it's not there neither, we
406 * calculate it now based on the total RAM size
408 if (ppc64_pft_size == 0)
409 of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
411 return 1UL << ppc64_pft_size;
413 /* round mem_size up to next power of 2 */
414 mem_size = lmb_phys_mem_size();
415 rnd_mem_size = 1UL << __ilog2(mem_size);
416 if (rnd_mem_size < mem_size)
420 pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
422 return pteg_count << 7;
425 #ifdef CONFIG_MEMORY_HOTPLUG
426 void create_section_mapping(unsigned long start, unsigned long end)
428 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
429 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
430 mmu_linear_psize, mmu_kernel_ssize));
432 #endif /* CONFIG_MEMORY_HOTPLUG */
434 static inline void make_bl(unsigned int *insn_addr, void *func)
436 unsigned long funcp = *((unsigned long *)func);
437 int offset = funcp - (unsigned long)insn_addr;
439 *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc));
440 flush_icache_range((unsigned long)insn_addr, 4+
441 (unsigned long)insn_addr);
444 static void __init htab_finish_init(void)
446 extern unsigned int *htab_call_hpte_insert1;
447 extern unsigned int *htab_call_hpte_insert2;
448 extern unsigned int *htab_call_hpte_remove;
449 extern unsigned int *htab_call_hpte_updatepp;
451 #ifdef CONFIG_PPC_HAS_HASH_64K
452 extern unsigned int *ht64_call_hpte_insert1;
453 extern unsigned int *ht64_call_hpte_insert2;
454 extern unsigned int *ht64_call_hpte_remove;
455 extern unsigned int *ht64_call_hpte_updatepp;
457 make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
458 make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
459 make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
460 make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
461 #endif /* CONFIG_PPC_HAS_HASH_64K */
463 make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
464 make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
465 make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
466 make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp);
469 void __init htab_initialize(void)
472 unsigned long pteg_count;
473 unsigned long mode_rw;
474 unsigned long base = 0, size = 0;
477 extern unsigned long tce_alloc_start, tce_alloc_end;
479 DBG(" -> htab_initialize()\n");
481 /* Initialize segment sizes */
482 htab_init_seg_sizes();
484 /* Initialize page sizes */
485 htab_init_page_sizes();
487 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
488 mmu_kernel_ssize = MMU_SEGSIZE_1T;
489 mmu_highuser_ssize = MMU_SEGSIZE_1T;
490 printk(KERN_INFO "Using 1TB segments\n");
494 * Calculate the required size of the htab. We want the number of
495 * PTEGs to equal one half the number of real pages.
497 htab_size_bytes = htab_get_table_size();
498 pteg_count = htab_size_bytes >> 7;
500 htab_hash_mask = pteg_count - 1;
502 if (firmware_has_feature(FW_FEATURE_LPAR)) {
503 /* Using a hypervisor which owns the htab */
507 /* Find storage for the HPT. Must be contiguous in
508 * the absolute address space.
510 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
512 DBG("Hash table allocated at %lx, size: %lx\n", table,
515 htab_address = abs_to_virt(table);
517 /* htab absolute addr + encoded htabsize */
518 _SDR1 = table + __ilog2(pteg_count) - 11;
520 /* Initialize the HPT with no entries */
521 memset((void *)table, 0, htab_size_bytes);
524 mtspr(SPRN_SDR1, _SDR1);
527 mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
529 #ifdef CONFIG_DEBUG_PAGEALLOC
530 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
531 linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count,
533 memset(linear_map_hash_slots, 0, linear_map_hash_count);
534 #endif /* CONFIG_DEBUG_PAGEALLOC */
536 /* On U3 based machines, we need to reserve the DART area and
537 * _NOT_ map it to avoid cache paradoxes as it's remapped non
541 /* create bolted the linear mapping in the hash table */
542 for (i=0; i < lmb.memory.cnt; i++) {
543 base = (unsigned long)__va(lmb.memory.region[i].base);
544 size = lmb.memory.region[i].size;
546 DBG("creating mapping for region: %lx : %lx\n", base, size);
548 #ifdef CONFIG_U3_DART
549 /* Do not map the DART space. Fortunately, it will be aligned
550 * in such a way that it will not cross two lmb regions and
551 * will fit within a single 16Mb page.
552 * The DART space is assumed to be a full 16Mb region even if
553 * we only use 2Mb of that space. We will use more of it later
554 * for AGP GART. We have to use a full 16Mb large page.
556 DBG("DART base: %lx\n", dart_tablebase);
558 if (dart_tablebase != 0 && dart_tablebase >= base
559 && dart_tablebase < (base + size)) {
560 unsigned long dart_table_end = dart_tablebase + 16 * MB;
561 if (base != dart_tablebase)
562 BUG_ON(htab_bolt_mapping(base, dart_tablebase,
566 if ((base + size) > dart_table_end)
567 BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
569 __pa(dart_table_end),
575 #endif /* CONFIG_U3_DART */
576 BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
577 mode_rw, mmu_linear_psize, mmu_kernel_ssize));
581 * If we have a memory_limit and we've allocated TCEs then we need to
582 * explicitly map the TCE area at the top of RAM. We also cope with the
583 * case that the TCEs start below memory_limit.
584 * tce_alloc_start/end are 16MB aligned so the mapping should work
585 * for either 4K or 16MB pages.
587 if (tce_alloc_start) {
588 tce_alloc_start = (unsigned long)__va(tce_alloc_start);
589 tce_alloc_end = (unsigned long)__va(tce_alloc_end);
591 if (base + size >= tce_alloc_start)
592 tce_alloc_start = base + size + 1;
594 BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
595 __pa(tce_alloc_start), mode_rw,
596 mmu_linear_psize, mmu_kernel_ssize));
601 DBG(" <- htab_initialize()\n");
606 void htab_initialize_secondary(void)
608 if (!firmware_has_feature(FW_FEATURE_LPAR))
609 mtspr(SPRN_SDR1, _SDR1);
613 * Called by asm hashtable.S for doing lazy icache flush
615 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
619 if (!pfn_valid(pte_pfn(pte)))
622 page = pte_page(pte);
625 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
627 __flush_dcache_icache(page_address(page));
628 set_bit(PG_arch_1, &page->flags);
636 * Demote a segment to using 4k pages.
637 * For now this makes the whole process use 4k pages.
639 #ifdef CONFIG_PPC_64K_PAGES
640 static void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
642 if (mm->context.user_psize == MMU_PAGE_4K)
644 slice_set_user_psize(mm, MMU_PAGE_4K);
645 #ifdef CONFIG_SPU_BASE
646 spu_flush_all_slbs(mm);
649 #endif /* CONFIG_PPC_64K_PAGES */
653 * 1 - normal page fault
654 * -1 - critical hash insertion error
656 int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
660 struct mm_struct *mm;
663 int rc, user_region = 0, local = 0;
666 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
669 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
670 DBG_LOW(" out of pgtable range !\n");
674 /* Get region & vsid */
675 switch (REGION_ID(ea)) {
680 DBG_LOW(" user region with no mm !\n");
683 #ifdef CONFIG_PPC_MM_SLICES
684 psize = get_slice_psize(mm, ea);
686 psize = mm->context.user_psize;
688 ssize = user_segment_size(ea);
689 vsid = get_vsid(mm->context.id, ea, ssize);
691 case VMALLOC_REGION_ID:
693 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
694 if (ea < VMALLOC_END)
695 psize = mmu_vmalloc_psize;
697 psize = mmu_io_psize;
698 ssize = mmu_kernel_ssize;
702 * Send the problem up to do_page_fault
706 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
713 /* Check CPU locality */
714 tmp = cpumask_of_cpu(smp_processor_id());
715 if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
718 #ifdef CONFIG_HUGETLB_PAGE
719 /* Handle hugepage regions */
720 if (HPAGE_SHIFT && psize == mmu_huge_psize) {
721 DBG_LOW(" -> huge page !\n");
722 return hash_huge_page(mm, access, ea, vsid, local, trap);
724 #endif /* CONFIG_HUGETLB_PAGE */
726 #ifndef CONFIG_PPC_64K_PAGES
727 /* If we use 4K pages and our psize is not 4K, then we are hitting
728 * a special driver mapping, we need to align the address before
731 if (psize != MMU_PAGE_4K)
732 ea &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
733 #endif /* CONFIG_PPC_64K_PAGES */
735 /* Get PTE and page size from page tables */
736 ptep = find_linux_pte(pgdir, ea);
737 if (ptep == NULL || !pte_present(*ptep)) {
738 DBG_LOW(" no PTE !\n");
742 #ifndef CONFIG_PPC_64K_PAGES
743 DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
745 DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
746 pte_val(*(ptep + PTRS_PER_PTE)));
748 /* Pre-check access permissions (will be re-checked atomically
749 * in __hash_page_XX but this pre-check is a fast path
751 if (access & ~pte_val(*ptep)) {
752 DBG_LOW(" no access !\n");
756 /* Do actual hashing */
757 #ifdef CONFIG_PPC_64K_PAGES
758 /* If _PAGE_4K_PFN is set, make sure this is a 4k segment */
759 if (pte_val(*ptep) & _PAGE_4K_PFN) {
760 demote_segment_4k(mm, ea);
764 /* If this PTE is non-cacheable and we have restrictions on
765 * using non cacheable large pages, then we switch to 4k
767 if (mmu_ci_restrictions && psize == MMU_PAGE_64K &&
768 (pte_val(*ptep) & _PAGE_NO_CACHE)) {
770 demote_segment_4k(mm, ea);
772 } else if (ea < VMALLOC_END) {
774 * some driver did a non-cacheable mapping
775 * in vmalloc space, so switch vmalloc
778 printk(KERN_ALERT "Reducing vmalloc segment "
779 "to 4kB pages because of "
780 "non-cacheable mapping\n");
781 psize = mmu_vmalloc_psize = MMU_PAGE_4K;
782 #ifdef CONFIG_SPU_BASE
783 spu_flush_all_slbs(mm);
788 if (psize != get_paca()->context.user_psize) {
789 get_paca()->context = mm->context;
790 slb_flush_and_rebolt();
792 } else if (get_paca()->vmalloc_sllp !=
793 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
794 get_paca()->vmalloc_sllp =
795 mmu_psize_defs[mmu_vmalloc_psize].sllp;
796 slb_vmalloc_update();
798 #endif /* CONFIG_PPC_64K_PAGES */
800 #ifdef CONFIG_PPC_HAS_HASH_64K
801 if (psize == MMU_PAGE_64K)
802 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
804 #endif /* CONFIG_PPC_HAS_HASH_64K */
805 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
807 #ifndef CONFIG_PPC_64K_PAGES
808 DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
810 DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
811 pte_val(*(ptep + PTRS_PER_PTE)));
813 DBG_LOW(" -> rc=%d\n", rc);
816 EXPORT_SYMBOL_GPL(hash_page);
818 void hash_preload(struct mm_struct *mm, unsigned long ea,
819 unsigned long access, unsigned long trap)
829 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
831 #ifdef CONFIG_PPC_MM_SLICES
832 /* We only prefault standard pages for now */
833 if (unlikely(get_slice_psize(mm, ea) != mm->context.user_psize))
837 DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
838 " trap=%lx\n", mm, mm->pgd, ea, access, trap);
840 /* Get Linux PTE if available */
844 ptep = find_linux_pte(pgdir, ea);
848 #ifdef CONFIG_PPC_64K_PAGES
849 /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on
850 * a 64K kernel), then we don't preload, hash_page() will take
851 * care of it once we actually try to access the page.
852 * That way we don't have to duplicate all of the logic for segment
853 * page size demotion here
855 if (pte_val(*ptep) & (_PAGE_4K_PFN | _PAGE_NO_CACHE))
857 #endif /* CONFIG_PPC_64K_PAGES */
860 ssize = user_segment_size(ea);
861 vsid = get_vsid(mm->context.id, ea, ssize);
863 /* Hash doesn't like irqs */
864 local_irq_save(flags);
866 /* Is that local to this CPU ? */
867 mask = cpumask_of_cpu(smp_processor_id());
868 if (cpus_equal(mm->cpu_vm_mask, mask))
872 #ifdef CONFIG_PPC_HAS_HASH_64K
873 if (mm->context.user_psize == MMU_PAGE_64K)
874 __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize);
876 #endif /* CONFIG_PPC_HAS_HASH_64K */
877 __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize);
879 local_irq_restore(flags);
882 /* WARNING: This is called from hash_low_64.S, if you change this prototype,
883 * do not forget to update the assembly call site !
885 void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
888 unsigned long hash, index, shift, hidx, slot;
890 DBG_LOW("flush_hash_page(va=%016x)\n", va);
891 pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
892 hash = hpt_hash(va, shift, ssize);
893 hidx = __rpte_to_hidx(pte, index);
894 if (hidx & _PTEIDX_SECONDARY)
896 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
897 slot += hidx & _PTEIDX_GROUP_IX;
898 DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
899 ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
900 } pte_iterate_hashed_end();
903 void flush_hash_range(unsigned long number, int local)
905 if (ppc_md.flush_hash_range)
906 ppc_md.flush_hash_range(number, local);
909 struct ppc64_tlb_batch *batch =
910 &__get_cpu_var(ppc64_tlb_batch);
912 for (i = 0; i < number; i++)
913 flush_hash_page(batch->vaddr[i], batch->pte[i],
914 batch->psize, batch->ssize, local);
919 * low_hash_fault is called when we the low level hash code failed
920 * to instert a PTE due to an hypervisor error
922 void low_hash_fault(struct pt_regs *regs, unsigned long address)
924 if (user_mode(regs)) {
927 info.si_signo = SIGBUS;
929 info.si_code = BUS_ADRERR;
930 info.si_addr = (void __user *)address;
931 force_sig_info(SIGBUS, &info, current);
934 bad_page_fault(regs, address, SIGBUS);
937 #ifdef CONFIG_DEBUG_PAGEALLOC
938 static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
940 unsigned long hash, hpteg;
941 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
942 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
943 unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY |
944 _PAGE_COHERENT | PP_RWXX | HPTE_R_N;
947 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
948 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
950 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
952 mmu_linear_psize, mmu_kernel_ssize);
954 spin_lock(&linear_map_hash_lock);
955 BUG_ON(linear_map_hash_slots[lmi] & 0x80);
956 linear_map_hash_slots[lmi] = ret | 0x80;
957 spin_unlock(&linear_map_hash_lock);
960 static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
962 unsigned long hash, hidx, slot;
963 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
964 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
966 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
967 spin_lock(&linear_map_hash_lock);
968 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
969 hidx = linear_map_hash_slots[lmi] & 0x7f;
970 linear_map_hash_slots[lmi] = 0;
971 spin_unlock(&linear_map_hash_lock);
972 if (hidx & _PTEIDX_SECONDARY)
974 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
975 slot += hidx & _PTEIDX_GROUP_IX;
976 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
979 void kernel_map_pages(struct page *page, int numpages, int enable)
981 unsigned long flags, vaddr, lmi;
984 local_irq_save(flags);
985 for (i = 0; i < numpages; i++, page++) {
986 vaddr = (unsigned long)page_address(page);
987 lmi = __pa(vaddr) >> PAGE_SHIFT;
988 if (lmi >= linear_map_hash_count)
991 kernel_map_linear_page(vaddr, lmi);
993 kernel_unmap_linear_page(vaddr, lmi);
995 local_irq_restore(flags);
997 #endif /* CONFIG_DEBUG_PAGEALLOC */