2 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
6 * Based on the IA-32 version:
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/smp_lock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sysctl.h>
20 #include <asm/pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
25 #include <asm/cputable.h>
28 #include <linux/sysctl.h>
30 #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
31 #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33 /* Modelled after find_linux_pte() */
34 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
41 BUG_ON(! in_hugepage_area(mm->context, addr));
45 pg = pgd_offset(mm, addr);
47 pu = pud_offset(pg, addr);
49 pm = pmd_offset(pu, addr);
52 && !(pte_present(*pt) && pte_huge(*pt)));
60 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
67 BUG_ON(! in_hugepage_area(mm->context, addr));
71 pg = pgd_offset(mm, addr);
72 pu = pud_alloc(mm, pg, addr);
75 pm = pmd_alloc(mm, pu, addr);
79 && !(pte_present(*pt) && pte_huge(*pt)));
87 #define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
89 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
94 if (pte_present(*ptep)) {
95 pte_clear(mm, addr, ptep);
99 for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
100 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
105 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
108 unsigned long old = pte_update(ptep, ~0UL);
111 if (old & _PAGE_HASHPTE)
112 hpte_update(mm, addr, old, 0);
114 for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
121 * This function checks for proper alignment of input addr and len parameters.
123 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
125 if (len & ~HPAGE_MASK)
127 if (addr & ~HPAGE_MASK)
129 if (! (within_hugepage_low_range(addr, len)
130 || within_hugepage_high_range(addr, len)) )
135 static void flush_low_segments(void *parm)
137 u16 areas = (unsigned long) parm;
140 asm volatile("isync" : : : "memory");
142 BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);
144 for (i = 0; i < NUM_LOW_AREAS; i++) {
145 if (! (areas & (1U << i)))
147 asm volatile("slbie %0"
148 : : "r" ((i << SID_SHIFT) | SLBIE_C));
151 asm volatile("isync" : : : "memory");
154 static void flush_high_segments(void *parm)
156 u16 areas = (unsigned long) parm;
159 asm volatile("isync" : : : "memory");
161 BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);
163 for (i = 0; i < NUM_HIGH_AREAS; i++) {
164 if (! (areas & (1U << i)))
166 for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
167 asm volatile("slbie %0"
168 :: "r" (((i << HTLB_AREA_SHIFT)
169 + (j << SID_SHIFT)) | SLBIE_C));
172 asm volatile("isync" : : : "memory");
175 static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
177 unsigned long start = area << SID_SHIFT;
178 unsigned long end = (area+1) << SID_SHIFT;
179 struct vm_area_struct *vma;
181 BUG_ON(area >= NUM_LOW_AREAS);
183 /* Check no VMAs are in the region */
184 vma = find_vma(mm, start);
185 if (vma && (vma->vm_start < end))
191 static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
193 unsigned long start = area << HTLB_AREA_SHIFT;
194 unsigned long end = (area+1) << HTLB_AREA_SHIFT;
195 struct vm_area_struct *vma;
197 BUG_ON(area >= NUM_HIGH_AREAS);
199 /* Check no VMAs are in the region */
200 vma = find_vma(mm, start);
201 if (vma && (vma->vm_start < end))
207 static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
211 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
212 BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);
214 newareas &= ~(mm->context.low_htlb_areas);
216 return 0; /* The segments we want are already open */
218 for (i = 0; i < NUM_LOW_AREAS; i++)
219 if ((1 << i) & newareas)
220 if (prepare_low_area_for_htlb(mm, i) != 0)
223 mm->context.low_htlb_areas |= newareas;
225 /* update the paca copy of the context struct */
226 get_paca()->context = mm->context;
228 /* the context change must make it to memory before the flush,
229 * so that further SLB misses do the right thing. */
231 on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);
236 static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
240 BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
241 BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
244 newareas &= ~(mm->context.high_htlb_areas);
246 return 0; /* The areas we want are already open */
248 for (i = 0; i < NUM_HIGH_AREAS; i++)
249 if ((1 << i) & newareas)
250 if (prepare_high_area_for_htlb(mm, i) != 0)
253 mm->context.high_htlb_areas |= newareas;
255 /* update the paca copy of the context struct */
256 get_paca()->context = mm->context;
258 /* the context change must make it to memory before the flush,
259 * so that further SLB misses do the right thing. */
261 on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
266 int prepare_hugepage_range(unsigned long addr, unsigned long len)
270 if ( (addr+len) < addr )
273 if ((addr + len) < 0x100000000UL)
274 err = open_low_hpage_areas(current->mm,
275 LOW_ESID_MASK(addr, len));
277 err = open_high_hpage_areas(current->mm,
278 HTLB_AREA_MASK(addr, len));
280 printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
281 " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
283 LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
291 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
296 if (! in_hugepage_area(mm->context, address))
297 return ERR_PTR(-EINVAL);
299 ptep = huge_pte_offset(mm, address);
300 page = pte_page(*ptep);
302 page += (address % HPAGE_SIZE) / PAGE_SIZE;
307 int pmd_huge(pmd_t pmd)
313 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
314 pmd_t *pmd, int write)
320 /* Because we have an exclusive hugepage region which lies within the
321 * normal user address space, we have to take special measures to make
322 * non-huge mmap()s evade the hugepage reserved regions. */
323 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
324 unsigned long len, unsigned long pgoff,
327 struct mm_struct *mm = current->mm;
328 struct vm_area_struct *vma;
329 unsigned long start_addr;
335 addr = PAGE_ALIGN(addr);
336 vma = find_vma(mm, addr);
337 if (((TASK_SIZE - len) >= addr)
338 && (!vma || (addr+len) <= vma->vm_start)
339 && !is_hugepage_only_range(mm, addr,len))
342 if (len > mm->cached_hole_size) {
343 start_addr = addr = mm->free_area_cache;
345 start_addr = addr = TASK_UNMAPPED_BASE;
346 mm->cached_hole_size = 0;
350 vma = find_vma(mm, addr);
351 while (TASK_SIZE - len >= addr) {
352 BUG_ON(vma && (addr >= vma->vm_end));
354 if (touches_hugepage_low_range(mm, addr, len)) {
355 addr = ALIGN(addr+1, 1<<SID_SHIFT);
356 vma = find_vma(mm, addr);
359 if (touches_hugepage_high_range(mm, addr, len)) {
360 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
361 vma = find_vma(mm, addr);
364 if (!vma || addr + len <= vma->vm_start) {
366 * Remember the place where we stopped the search:
368 mm->free_area_cache = addr + len;
371 if (addr + mm->cached_hole_size < vma->vm_start)
372 mm->cached_hole_size = vma->vm_start - addr;
377 /* Make sure we didn't miss any holes */
378 if (start_addr != TASK_UNMAPPED_BASE) {
379 start_addr = addr = TASK_UNMAPPED_BASE;
380 mm->cached_hole_size = 0;
387 * This mmap-allocator allocates new areas top-down from below the
388 * stack's low limit (the base):
390 * Because we have an exclusive hugepage region which lies within the
391 * normal user address space, we have to take special measures to make
392 * non-huge mmap()s evade the hugepage reserved regions.
395 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
396 const unsigned long len, const unsigned long pgoff,
397 const unsigned long flags)
399 struct vm_area_struct *vma, *prev_vma;
400 struct mm_struct *mm = current->mm;
401 unsigned long base = mm->mmap_base, addr = addr0;
402 unsigned long largest_hole = mm->cached_hole_size;
405 /* requested length too big for entire address space */
409 /* dont allow allocations above current base */
410 if (mm->free_area_cache > base)
411 mm->free_area_cache = base;
413 /* requesting a specific address */
415 addr = PAGE_ALIGN(addr);
416 vma = find_vma(mm, addr);
417 if (TASK_SIZE - len >= addr &&
418 (!vma || addr + len <= vma->vm_start)
419 && !is_hugepage_only_range(mm, addr,len))
423 if (len <= largest_hole) {
425 mm->free_area_cache = base;
428 /* make sure it can fit in the remaining address space */
429 if (mm->free_area_cache < len)
432 /* either no address requested or cant fit in requested address hole */
433 addr = (mm->free_area_cache - len) & PAGE_MASK;
436 if (touches_hugepage_low_range(mm, addr, len)) {
437 addr = (addr & ((~0) << SID_SHIFT)) - len;
438 goto hugepage_recheck;
439 } else if (touches_hugepage_high_range(mm, addr, len)) {
440 addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
441 goto hugepage_recheck;
445 * Lookup failure means no vma is above this address,
446 * i.e. return with success:
448 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
452 * new region fits between prev_vma->vm_end and
453 * vma->vm_start, use it:
455 if (addr+len <= vma->vm_start &&
456 (!prev_vma || (addr >= prev_vma->vm_end))) {
457 /* remember the address as a hint for next time */
458 mm->cached_hole_size = largest_hole;
459 return (mm->free_area_cache = addr);
461 /* pull free_area_cache down to the first hole */
462 if (mm->free_area_cache == vma->vm_end) {
463 mm->free_area_cache = vma->vm_start;
464 mm->cached_hole_size = largest_hole;
468 /* remember the largest hole we saw so far */
469 if (addr + largest_hole < vma->vm_start)
470 largest_hole = vma->vm_start - addr;
472 /* try just below the current vma->vm_start */
473 addr = vma->vm_start-len;
474 } while (len <= vma->vm_start);
478 * if hint left us with no space for the requested
479 * mapping then try again:
482 mm->free_area_cache = base;
488 * A failed mmap() very likely causes application failure,
489 * so fall back to the bottom-up function here. This scenario
490 * can happen with large stack limits and large mmap()
493 mm->free_area_cache = TASK_UNMAPPED_BASE;
494 mm->cached_hole_size = ~0UL;
495 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
497 * Restore the topdown base:
499 mm->free_area_cache = base;
500 mm->cached_hole_size = ~0UL;
505 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
507 unsigned long addr = 0;
508 struct vm_area_struct *vma;
510 vma = find_vma(current->mm, addr);
511 while (addr + len <= 0x100000000UL) {
512 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
514 if (! __within_hugepage_low_range(addr, len, segmask)) {
515 addr = ALIGN(addr+1, 1<<SID_SHIFT);
516 vma = find_vma(current->mm, addr);
520 if (!vma || (addr + len) <= vma->vm_start)
522 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
523 /* Depending on segmask this might not be a confirmed
524 * hugepage region, so the ALIGN could have skipped
526 vma = find_vma(current->mm, addr);
532 static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
534 unsigned long addr = 0x100000000UL;
535 struct vm_area_struct *vma;
537 vma = find_vma(current->mm, addr);
538 while (addr + len <= TASK_SIZE_USER64) {
539 BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
541 if (! __within_hugepage_high_range(addr, len, areamask)) {
542 addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
543 vma = find_vma(current->mm, addr);
547 if (!vma || (addr + len) <= vma->vm_start)
549 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
550 /* Depending on segmask this might not be a confirmed
551 * hugepage region, so the ALIGN could have skipped
553 vma = find_vma(current->mm, addr);
559 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
560 unsigned long len, unsigned long pgoff,
564 u16 areamask, curareas;
566 if (len & ~HPAGE_MASK)
569 if (!cpu_has_feature(CPU_FTR_16M_PAGE))
572 if (test_thread_flag(TIF_32BIT)) {
573 curareas = current->mm->context.low_htlb_areas;
575 /* First see if we can do the mapping in the existing
577 addr = htlb_get_low_area(len, curareas);
582 for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
583 ! lastshift; areamask >>=1) {
587 addr = htlb_get_low_area(len, curareas | areamask);
588 if ((addr != -ENOMEM)
589 && open_low_hpage_areas(current->mm, areamask) == 0)
593 curareas = current->mm->context.high_htlb_areas;
595 /* First see if we can do the mapping in the existing
597 addr = htlb_get_high_area(len, curareas);
602 for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
603 ! lastshift; areamask >>=1) {
607 addr = htlb_get_high_area(len, curareas | areamask);
608 if ((addr != -ENOMEM)
609 && open_high_hpage_areas(current->mm, areamask) == 0)
613 printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
618 int hash_huge_page(struct mm_struct *mm, unsigned long access,
619 unsigned long ea, unsigned long vsid, int local)
622 unsigned long va, vpn;
623 pte_t old_pte, new_pte;
624 unsigned long rflags, prpn;
628 spin_lock(&mm->page_table_lock);
630 ptep = huge_pte_offset(mm, ea);
632 /* Search the Linux page table for a match with va */
633 va = (vsid << 28) | (ea & 0x0fffffff);
634 vpn = va >> HPAGE_SHIFT;
637 * If no pte found or not present, send the problem up to
640 if (unlikely(!ptep || pte_none(*ptep)))
643 /* BUG_ON(pte_bad(*ptep)); */
646 * Check the user's access rights to the page. If access should be
647 * prevented then send the problem up to do_page_fault.
649 if (unlikely(access & ~pte_val(*ptep)))
652 * At this point, we have a pte (old_pte) which can be used to build
653 * or update an HPTE. There are 2 cases:
655 * 1. There is a valid (present) pte with no associated HPTE (this is
656 * the most common case)
657 * 2. There is a valid (present) pte with an associated HPTE. The
658 * current values of the pp bits in the HPTE prevent access
659 * because we are doing software DIRTY bit management and the
660 * page is currently not DIRTY.
667 rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
668 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
669 rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
671 /* Check if pte already has an hpte (case 2) */
672 if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
673 /* There MIGHT be an HPTE for this pte */
674 unsigned long hash, slot;
676 hash = hpt_hash(vpn, 1);
677 if (pte_val(old_pte) & _PAGE_SECONDARY)
679 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
680 slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
682 if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
683 pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
686 if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
687 unsigned long hash = hpt_hash(vpn, 1);
688 unsigned long hpte_group;
690 prpn = pte_pfn(old_pte);
693 hpte_group = ((hash & htab_hash_mask) *
694 HPTES_PER_GROUP) & ~0x7UL;
696 /* Update the linux pte with the HPTE slot */
697 pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
698 pte_val(new_pte) |= _PAGE_HASHPTE;
700 /* Add in WIMG bits */
701 /* XXX We should store these in the pte */
702 rflags |= _PAGE_COHERENT;
704 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
705 HPTE_V_LARGE, rflags);
707 /* Primary is full, try the secondary */
708 if (unlikely(slot == -1)) {
709 pte_val(new_pte) |= _PAGE_SECONDARY;
710 hpte_group = ((~hash & htab_hash_mask) *
711 HPTES_PER_GROUP) & ~0x7UL;
712 slot = ppc_md.hpte_insert(hpte_group, va, prpn,
718 hpte_group = ((hash & htab_hash_mask) *
719 HPTES_PER_GROUP)&~0x7UL;
721 ppc_md.hpte_remove(hpte_group);
726 if (unlikely(slot == -2))
727 panic("hash_huge_page: pte_insert failed\n");
729 pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX;
732 * No need to use ldarx/stdcx here because all who
733 * might be updating the pte will hold the
742 spin_unlock(&mm->page_table_lock);