2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 #ifdef CONFIG_PPC_64K_PAGES
34 #define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT)
36 #define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
38 #define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
39 #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
41 #define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE)
42 #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
43 #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
45 #define huge_pgtable_cache (pgtable_cache[HUGEPTE_CACHE_NUM])
47 /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
48 * will choke on pointers to hugepte tables, which is handy for
49 * catching screwups early. */
52 typedef struct { unsigned long pd; } hugepd_t;
54 #define hugepd_none(hpd) ((hpd).pd == 0)
56 static inline pte_t *hugepd_page(hugepd_t hpd)
58 BUG_ON(!(hpd.pd & HUGEPD_OK));
59 return (pte_t *)(hpd.pd & ~HUGEPD_OK);
62 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr)
64 unsigned long idx = ((addr >> HPAGE_SHIFT) & (PTRS_PER_HUGEPTE-1));
65 pte_t *dir = hugepd_page(*hpdp);
70 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
71 unsigned long address)
73 pte_t *new = kmem_cache_alloc(huge_pgtable_cache,
74 GFP_KERNEL|__GFP_REPEAT);
79 spin_lock(&mm->page_table_lock);
80 if (!hugepd_none(*hpdp))
81 kmem_cache_free(huge_pgtable_cache, new);
83 hpdp->pd = (unsigned long)new | HUGEPD_OK;
84 spin_unlock(&mm->page_table_lock);
88 /* Modelled after find_linux_pte() */
89 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
94 BUG_ON(! in_hugepage_area(mm->context, addr));
98 pg = pgd_offset(mm, addr);
100 pu = pud_offset(pg, addr);
101 if (!pud_none(*pu)) {
102 #ifdef CONFIG_PPC_64K_PAGES
104 pm = pmd_offset(pu, addr);
106 return hugepte_offset((hugepd_t *)pm, addr);
108 return hugepte_offset((hugepd_t *)pu, addr);
116 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
120 hugepd_t *hpdp = NULL;
122 BUG_ON(! in_hugepage_area(mm->context, addr));
126 pg = pgd_offset(mm, addr);
127 pu = pud_alloc(mm, pg, addr);
130 #ifdef CONFIG_PPC_64K_PAGES
132 pm = pmd_alloc(mm, pu, addr);
134 hpdp = (hugepd_t *)pm;
136 hpdp = (hugepd_t *)pu;
143 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr))
146 return hugepte_offset(hpdp, addr);
149 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
154 static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
156 pte_t *hugepte = hugepd_page(*hpdp);
160 pgtable_free_tlb(tlb, pgtable_free_cache(hugepte, HUGEPTE_CACHE_NUM,
164 #ifdef CONFIG_PPC_64K_PAGES
165 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
166 unsigned long addr, unsigned long end,
167 unsigned long floor, unsigned long ceiling)
174 pmd = pmd_offset(pud, addr);
176 next = pmd_addr_end(addr, end);
179 free_hugepte_range(tlb, (hugepd_t *)pmd);
180 } while (pmd++, addr = next, addr != end);
190 if (end - 1 > ceiling - 1)
193 pmd = pmd_offset(pud, start);
195 pmd_free_tlb(tlb, pmd);
199 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
200 unsigned long addr, unsigned long end,
201 unsigned long floor, unsigned long ceiling)
208 pud = pud_offset(pgd, addr);
210 next = pud_addr_end(addr, end);
211 #ifdef CONFIG_PPC_64K_PAGES
212 if (pud_none_or_clear_bad(pud))
214 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
218 free_hugepte_range(tlb, (hugepd_t *)pud);
220 } while (pud++, addr = next, addr != end);
226 ceiling &= PGDIR_MASK;
230 if (end - 1 > ceiling - 1)
233 pud = pud_offset(pgd, start);
235 pud_free_tlb(tlb, pud);
239 * This function frees user-level page tables of a process.
241 * Must be called with pagetable lock held.
243 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
244 unsigned long addr, unsigned long end,
245 unsigned long floor, unsigned long ceiling)
252 * Comments below take from the normal free_pgd_range(). They
253 * apply here too. The tests against HUGEPD_MASK below are
254 * essential, because we *don't* test for this at the bottom
255 * level. Without them we'll attempt to free a hugepte table
256 * when we unmap just part of it, even if there are other
257 * active mappings using it.
259 * The next few lines have given us lots of grief...
261 * Why are we testing HUGEPD* at this top level? Because
262 * often there will be no work to do at all, and we'd prefer
263 * not to go all the way down to the bottom just to discover
266 * Why all these "- 1"s? Because 0 represents both the bottom
267 * of the address space and the top of it (using -1 for the
268 * top wouldn't help much: the masks would do the wrong thing).
269 * The rule is that addr 0 and floor 0 refer to the bottom of
270 * the address space, but end 0 and ceiling 0 refer to the top
271 * Comparisons need to use "end - 1" and "ceiling - 1" (though
272 * that end 0 case should be mythical).
274 * Wherever addr is brought up or ceiling brought down, we
275 * must be careful to reject "the opposite 0" before it
276 * confuses the subsequent tests. But what about where end is
277 * brought down by HUGEPD_SIZE below? no, end can't go down to
280 * Whereas we round start (addr) and ceiling down, by different
281 * masks at different levels, in order to test whether a table
282 * now has no other vmas using it, so can be freed, we don't
283 * bother to round floor or end up - the tests don't need that.
293 ceiling &= HUGEPD_MASK;
297 if (end - 1 > ceiling - 1)
303 pgd = pgd_offset((*tlb)->mm, addr);
305 BUG_ON(! in_hugepage_area((*tlb)->mm->context, addr));
306 next = pgd_addr_end(addr, end);
307 if (pgd_none_or_clear_bad(pgd))
309 hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
310 } while (pgd++, addr = next, addr != end);
313 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
314 pte_t *ptep, pte_t pte)
316 if (pte_present(*ptep)) {
317 /* We open-code pte_clear because we need to pass the right
318 * argument to hpte_update (huge / !huge)
320 unsigned long old = pte_update(ptep, ~0UL);
321 if (old & _PAGE_HASHPTE)
322 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
325 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
328 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
331 unsigned long old = pte_update(ptep, ~0UL);
333 if (old & _PAGE_HASHPTE)
334 hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
340 struct slb_flush_info {
341 struct mm_struct *mm;
345 static void flush_low_segments(void *parm)
347 struct slb_flush_info *fi = parm;
350 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);
352 if (current->active_mm != fi->mm)
355 /* Only need to do anything if this CPU is working in the same
356 * mm as the one which has changed */
358 /* update the paca copy of the context struct */
359 get_paca()->context = current->active_mm->context;
361 asm volatile("isync" : : : "memory");
362 for (i = 0; i < NUM_LOW_AREAS; i++) {
363 if (! (fi->newareas & (1U << i)))
365 asm volatile("slbie %0"
366 : : "r" ((i << SID_SHIFT) | SLBIE_C));
368 asm volatile("isync" : : : "memory");
371 static void flush_high_segments(void *parm)
373 struct slb_flush_info *fi = parm;
377 BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);
379 if (current->active_mm != fi->mm)
382 /* Only need to do anything if this CPU is working in the same
383 * mm as the one which has changed */
385 /* update the paca copy of the context struct */
386 get_paca()->context = current->active_mm->context;
388 asm volatile("isync" : : : "memory");
389 for (i = 0; i < NUM_HIGH_AREAS; i++) {
390 if (! (fi->newareas & (1U << i)))
392 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
393 asm volatile("slbie %0"
394 :: "r" (((i << HTLB_AREA_SHIFT)
395 + (j << SID_SHIFT)) | SLBIE_C));
397 asm volatile("isync" : : : "memory");
400 static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
402 unsigned long start = area << SID_SHIFT;
403 unsigned long end = (area+1) << SID_SHIFT;
404 struct vm_area_struct *vma;
406 BUG_ON(area >= NUM_LOW_AREAS);
408 /* Check no VMAs are in the region */
409 vma = find_vma(mm, start);
410 if (vma && (vma->vm_start < end))
416 static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
418 unsigned long start = area << HTLB_AREA_SHIFT;
419 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
420 struct vm_area_struct *vma;
422 BUG_ON(area >= NUM_HIGH_AREAS);
424 /* Hack, so that each addresses is controlled by exactly one
425 * of the high or low area bitmaps, the first high area starts
428 start = 0x100000000UL;
430 /* Check no VMAs are in the region */
431 vma = find_vma(mm, start);
432 if (vma && (vma->vm_start < end))
438 static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
441 struct slb_flush_info fi;
443 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
444 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
446 newareas &= ~(mm->context.low_htlb_areas);
448 return 0; /* The segments we want are already open */
450 for (i = 0; i < NUM_LOW_AREAS; i++)
451 if ((1 << i) & newareas)
452 if (prepare_low_area_for_htlb(mm, i) != 0)
455 mm->context.low_htlb_areas |= newareas;
457 /* the context change must make it to memory before the flush,
458 * so that further SLB misses do the right thing. */
462 fi.newareas = newareas;
463 on_each_cpu(flush_low_segments, &fi, 0, 1);
468 static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
470 struct slb_flush_info fi;
473 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
474 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
477 newareas &= ~(mm->context.high_htlb_areas);
479 return 0; /* The areas we want are already open */
481 for (i = 0; i < NUM_HIGH_AREAS; i++)
482 if ((1 << i) & newareas)
483 if (prepare_high_area_for_htlb(mm, i) != 0)
486 mm->context.high_htlb_areas |= newareas;
488 /* the context change must make it to memory before the flush,
489 * so that further SLB misses do the right thing. */
493 fi.newareas = newareas;
494 on_each_cpu(flush_high_segments, &fi, 0, 1);
499 int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
503 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
505 if (len & ~HPAGE_MASK)
507 if (addr & ~HPAGE_MASK)
510 if (addr < 0x100000000UL)
511 err = open_low_hpage_areas(current->mm,
512 LOW_ESID_MASK(addr, len));
513 if ((addr + len) > 0x100000000UL)
514 err = open_high_hpage_areas(current->mm,
515 HTLB_AREA_MASK(addr, len));
517 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
518 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
520 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
528 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
533 if (! in_hugepage_area(mm->context, address))
534 return ERR_PTR(-EINVAL);
536 ptep = huge_pte_offset(mm, address);
537 page = pte_page(*ptep);
539 page += (address % HPAGE_SIZE) / PAGE_SIZE;
544 int pmd_huge(pmd_t pmd)
550 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
551 pmd_t *pmd, int write)
557 /* Because we have an exclusive hugepage region which lies within the
558 * normal user address space, we have to take special measures to make
559 * non-huge mmap()s evade the hugepage reserved regions. */
560 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
561 unsigned long len, unsigned long pgoff,
564 struct mm_struct *mm = current->mm;
565 struct vm_area_struct *vma;
566 unsigned long start_addr;
572 addr = PAGE_ALIGN(addr);
573 vma = find_vma(mm, addr);
574 if (((TASK_SIZE - len) >= addr)
575 && (!vma || (addr+len) <= vma->vm_start)
576 && !is_hugepage_only_range(mm, addr,len))
579 if (len > mm->cached_hole_size) {
580 start_addr = addr = mm->free_area_cache;
582 start_addr = addr = TASK_UNMAPPED_BASE;
583 mm->cached_hole_size = 0;
587 vma = find_vma(mm, addr);
588 while (TASK_SIZE - len >= addr) {
589 BUG_ON(vma && (addr >= vma->vm_end));
591 if (touches_hugepage_low_range(mm, addr, len)) {
592 addr = ALIGN(addr+1, 1<<SID_SHIFT);
593 vma = find_vma(mm, addr);
596 if (touches_hugepage_high_range(mm, addr, len)) {
597 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
598 vma = find_vma(mm, addr);
601 if (!vma || addr + len <= vma->vm_start) {
603 * Remember the place where we stopped the search:
605 mm->free_area_cache = addr + len;
608 if (addr + mm->cached_hole_size < vma->vm_start)
609 mm->cached_hole_size = vma->vm_start - addr;
614 /* Make sure we didn't miss any holes */
615 if (start_addr != TASK_UNMAPPED_BASE) {
616 start_addr = addr = TASK_UNMAPPED_BASE;
617 mm->cached_hole_size = 0;
624 * This mmap-allocator allocates new areas top-down from below the
625 * stack's low limit (the base):
627 * Because we have an exclusive hugepage region which lies within the
628 * normal user address space, we have to take special measures to make
629 * non-huge mmap()s evade the hugepage reserved regions.
632 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
633 const unsigned long len, const unsigned long pgoff,
634 const unsigned long flags)
636 struct vm_area_struct *vma, *prev_vma;
637 struct mm_struct *mm = current->mm;
638 unsigned long base = mm->mmap_base, addr = addr0;
639 unsigned long largest_hole = mm->cached_hole_size;
642 /* requested length too big for entire address space */
646 /* dont allow allocations above current base */
647 if (mm->free_area_cache > base)
648 mm->free_area_cache = base;
650 /* requesting a specific address */
652 addr = PAGE_ALIGN(addr);
653 vma = find_vma(mm, addr);
654 if (TASK_SIZE - len >= addr &&
655 (!vma || addr + len <= vma->vm_start)
656 && !is_hugepage_only_range(mm, addr,len))
660 if (len <= largest_hole) {
662 mm->free_area_cache = base;
665 /* make sure it can fit in the remaining address space */
666 if (mm->free_area_cache < len)
669 /* either no address requested or cant fit in requested address hole */
670 addr = (mm->free_area_cache - len) & PAGE_MASK;
673 if (touches_hugepage_low_range(mm, addr, len)) {
674 addr = (addr & ((~0) << SID_SHIFT)) - len;
675 goto hugepage_recheck;
676 } else if (touches_hugepage_high_range(mm, addr, len)) {
677 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
678 goto hugepage_recheck;
682 * Lookup failure means no vma is above this address,
683 * i.e. return with success:
685 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
689 * new region fits between prev_vma->vm_end and
690 * vma->vm_start, use it:
692 if (addr+len <= vma->vm_start &&
693 (!prev_vma || (addr >= prev_vma->vm_end))) {
694 /* remember the address as a hint for next time */
695 mm->cached_hole_size = largest_hole;
696 return (mm->free_area_cache = addr);
698 /* pull free_area_cache down to the first hole */
699 if (mm->free_area_cache == vma->vm_end) {
700 mm->free_area_cache = vma->vm_start;
701 mm->cached_hole_size = largest_hole;
705 /* remember the largest hole we saw so far */
706 if (addr + largest_hole < vma->vm_start)
707 largest_hole = vma->vm_start - addr;
709 /* try just below the current vma->vm_start */
710 addr = vma->vm_start-len;
711 } while (len <= vma->vm_start);
715 * if hint left us with no space for the requested
716 * mapping then try again:
719 mm->free_area_cache = base;
725 * A failed mmap() very likely causes application failure,
726 * so fall back to the bottom-up function here. This scenario
727 * can happen with large stack limits and large mmap()
730 mm->free_area_cache = TASK_UNMAPPED_BASE;
731 mm->cached_hole_size = ~0UL;
732 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
734 * Restore the topdown base:
736 mm->free_area_cache = base;
737 mm->cached_hole_size = ~0UL;
742 static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
744 struct vm_area_struct *vma;
746 vma = find_vma(current->mm, addr);
747 if (TASK_SIZE - len >= addr &&
748 (!vma || ((addr + len) <= vma->vm_start)))
754 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
756 unsigned long addr = 0;
757 struct vm_area_struct *vma;
759 vma = find_vma(current->mm, addr);
760 while (addr + len <= 0x100000000UL) {
761 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
763 if (! __within_hugepage_low_range(addr, len, segmask)) {
764 addr = ALIGN(addr+1, 1<<SID_SHIFT);
765 vma = find_vma(current->mm, addr);
769 if (!vma || (addr + len) <= vma->vm_start)
771 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
772 /* Depending on segmask this might not be a confirmed
773 * hugepage region, so the ALIGN could have skipped
775 vma = find_vma(current->mm, addr);
781 static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
783 unsigned long addr = 0x100000000UL;
784 struct vm_area_struct *vma;
786 vma = find_vma(current->mm, addr);
787 while (addr + len <= TASK_SIZE_USER64) {
788 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
790 if (! __within_hugepage_high_range(addr, len, areamask)) {
791 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
792 vma = find_vma(current->mm, addr);
796 if (!vma || (addr + len) <= vma->vm_start)
798 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
799 /* Depending on segmask this might not be a confirmed
800 * hugepage region, so the ALIGN could have skipped
802 vma = find_vma(current->mm, addr);
808 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
809 unsigned long len, unsigned long pgoff,
813 u16 areamask, curareas;
815 if (HPAGE_SHIFT == 0)
817 if (len & ~HPAGE_MASK)
822 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
825 /* Paranoia, caller should have dealt with this */
826 BUG_ON((addr + len) < addr);
828 if (test_thread_flag(TIF_32BIT)) {
829 curareas = current->mm->context.low_htlb_areas;
831 /* First see if we can use the hint address */
832 if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
833 areamask = LOW_ESID_MASK(addr, len);
834 if (open_low_hpage_areas(current->mm, areamask) == 0)
838 /* Next see if we can map in the existing low areas */
839 addr = htlb_get_low_area(len, curareas);
843 /* Finally go looking for areas to open */
845 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
846 ! lastshift; areamask >>=1) {
850 addr = htlb_get_low_area(len, curareas | areamask);
851 if ((addr != -ENOMEM)
852 && open_low_hpage_areas(current->mm, areamask) == 0)
856 curareas = current->mm->context.high_htlb_areas;
858 /* First see if we can use the hint address */
859 /* We discourage 64-bit processes from doing hugepage
860 * mappings below 4GB (must use MAP_FIXED) */
861 if ((addr >= 0x100000000UL)
862 && (htlb_check_hinted_area(addr, len) == 0)) {
863 areamask = HTLB_AREA_MASK(addr, len);
864 if (open_high_hpage_areas(current->mm, areamask) == 0)
868 /* Next see if we can map in the existing high areas */
869 addr = htlb_get_high_area(len, curareas);
873 /* Finally go looking for areas to open */
875 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
876 ! lastshift; areamask >>=1) {
880 addr = htlb_get_high_area(len, curareas | areamask);
881 if ((addr != -ENOMEM)
882 && open_high_hpage_areas(current->mm, areamask) == 0)
886 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
892 * Called by asm hashtable.S for doing lazy icache flush
894 static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
900 if (!pfn_valid(pte_pfn(pte)))
903 page = pte_page(pte);
906 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
908 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
909 __flush_dcache_icache(page_address(page+i));
910 set_bit(PG_arch_1, &page->flags);
918 int hash_huge_page(struct mm_struct *mm, unsigned long access,
919 unsigned long ea, unsigned long vsid, int local,
923 unsigned long old_pte, new_pte;
924 unsigned long va, rflags, pa;
928 ptep = huge_pte_offset(mm, ea);
930 /* Search the Linux page table for a match with va */
931 va = (vsid << 28) | (ea & 0x0fffffff);
934 * If no pte found or not present, send the problem up to
937 if (unlikely(!ptep || pte_none(*ptep)))
941 * Check the user's access rights to the page. If access should be
942 * prevented then send the problem up to do_page_fault.
944 if (unlikely(access & ~pte_val(*ptep)))
947 * At this point, we have a pte (old_pte) which can be used to build
948 * or update an HPTE. There are 2 cases:
950 * 1. There is a valid (present) pte with no associated HPTE (this is
951 * the most common case)
952 * 2. There is a valid (present) pte with an associated HPTE. The
953 * current values of the pp bits in the HPTE prevent access
954 * because we are doing software DIRTY bit management and the
955 * page is currently not DIRTY.
960 old_pte = pte_val(*ptep);
961 if (old_pte & _PAGE_BUSY)
963 new_pte = old_pte | _PAGE_BUSY |
964 _PAGE_ACCESSED | _PAGE_HASHPTE;
965 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
968 rflags = 0x2 | (!(new_pte & _PAGE_RW));
969 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
970 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
971 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
972 /* No CPU has hugepages but lacks no execute, so we
973 * don't need to worry about that case */
974 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
977 /* Check if pte already has an hpte (case 2) */
978 if (unlikely(old_pte & _PAGE_HASHPTE)) {
979 /* There MIGHT be an HPTE for this pte */
980 unsigned long hash, slot;
982 hash = hpt_hash(va, HPAGE_SHIFT);
983 if (old_pte & _PAGE_F_SECOND)
985 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
986 slot += (old_pte & _PAGE_F_GIX) >> 12;
988 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
990 old_pte &= ~_PAGE_HPTEFLAGS;
993 if (likely(!(old_pte & _PAGE_HASHPTE))) {
994 unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
995 unsigned long hpte_group;
997 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
1000 hpte_group = ((hash & htab_hash_mask) *
1001 HPTES_PER_GROUP) & ~0x7UL;
1003 /* clear HPTE slot informations in new PTE */
1004 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
1006 /* Add in WIMG bits */
1007 /* XXX We should store these in the pte */
1008 /* --BenH: I think they are ... */
1009 rflags |= _PAGE_COHERENT;
1011 /* Insert into the hash table, primary slot */
1012 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
1015 /* Primary is full, try the secondary */
1016 if (unlikely(slot == -1)) {
1017 hpte_group = ((~hash & htab_hash_mask) *
1018 HPTES_PER_GROUP) & ~0x7UL;
1019 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
1024 hpte_group = ((hash & htab_hash_mask) *
1025 HPTES_PER_GROUP)&~0x7UL;
1027 ppc_md.hpte_remove(hpte_group);
1032 if (unlikely(slot == -2))
1033 panic("hash_huge_page: pte_insert failed\n");
1035 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
1039 * No need to use ldarx/stdcx here
1041 *ptep = __pte(new_pte & ~_PAGE_BUSY);
1049 static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
1051 memset(addr, 0, kmem_cache_size(cache));
1054 static int __init hugetlbpage_init(void)
1056 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
1059 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
1062 SLAB_HWCACHE_ALIGN |
1063 SLAB_MUST_HWCACHE_ALIGN,
1065 if (! huge_pgtable_cache)
1066 panic("hugetlbpage_init(): could not create hugepte cache\n");
1071 module_init(hugetlbpage_init);