2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
27 pgd = pgd_offset(mm, addr);
28 pud = pud_alloc(mm, pgd, addr);
30 pte = (pte_t *) pmd_alloc(mm, pud, addr);
31 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
36 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
42 pgd = pgd_offset(mm, addr);
43 if (pgd_present(*pgd)) {
44 pud = pud_offset(pgd, addr);
45 if (pud_present(*pud))
46 pmd = pmd_offset(pud, addr);
52 * This function checks for proper alignment of input addr and len parameters.
54 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
56 if (len & ~HPAGE_MASK)
58 if (addr & ~HPAGE_MASK)
63 #if 0 /* This is just for testing */
65 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
67 unsigned long start = address;
71 struct vm_area_struct *vma;
73 vma = find_vma(mm, addr);
74 if (!vma || !is_vm_hugetlb_page(vma))
75 return ERR_PTR(-EINVAL);
77 pte = huge_pte_offset(mm, address);
79 /* hugetlb should be locked, and hence, prefaulted */
80 WARN_ON(!pte || pte_none(*pte));
82 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
84 WARN_ON(!PageCompound(page));
89 int pmd_huge(pmd_t pmd)
95 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
96 pmd_t *pmd, int write)
104 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
106 return ERR_PTR(-EINVAL);
109 int pmd_huge(pmd_t pmd)
111 return !!(pmd_val(pmd) & _PAGE_PSE);
115 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
116 pmd_t *pmd, int write)
120 page = pte_page(*(pte_t *)pmd);
122 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
127 /* x86_64 also uses this file */
129 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
130 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
131 unsigned long addr, unsigned long len,
132 unsigned long pgoff, unsigned long flags)
134 struct mm_struct *mm = current->mm;
135 struct vm_area_struct *vma;
136 unsigned long start_addr;
138 if (len > mm->cached_hole_size) {
139 start_addr = mm->free_area_cache;
141 start_addr = TASK_UNMAPPED_BASE;
142 mm->cached_hole_size = 0;
146 addr = ALIGN(start_addr, HPAGE_SIZE);
148 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
149 /* At this point: (!vma || addr < vma->vm_end). */
150 if (TASK_SIZE - len < addr) {
152 * Start a new search - just in case we missed
155 if (start_addr != TASK_UNMAPPED_BASE) {
156 start_addr = TASK_UNMAPPED_BASE;
157 mm->cached_hole_size = 0;
162 if (!vma || addr + len <= vma->vm_start) {
163 mm->free_area_cache = addr + len;
166 if (addr + mm->cached_hole_size < vma->vm_start)
167 mm->cached_hole_size = vma->vm_start - addr;
168 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
172 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
173 unsigned long addr0, unsigned long len,
174 unsigned long pgoff, unsigned long flags)
176 struct mm_struct *mm = current->mm;
177 struct vm_area_struct *vma, *prev_vma;
178 unsigned long base = mm->mmap_base, addr = addr0;
179 unsigned long largest_hole = mm->cached_hole_size;
182 /* don't allow allocations above current base */
183 if (mm->free_area_cache > base)
184 mm->free_area_cache = base;
186 if (len <= largest_hole) {
188 mm->free_area_cache = base;
191 /* make sure it can fit in the remaining address space */
192 if (mm->free_area_cache < len)
195 /* either no address requested or cant fit in requested address hole */
196 addr = (mm->free_area_cache - len) & HPAGE_MASK;
199 * Lookup failure means no vma is above this address,
200 * i.e. return with success:
202 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
206 * new region fits between prev_vma->vm_end and
207 * vma->vm_start, use it:
209 if (addr + len <= vma->vm_start &&
210 (!prev_vma || (addr >= prev_vma->vm_end))) {
211 /* remember the address as a hint for next time */
212 mm->cached_hole_size = largest_hole;
213 return (mm->free_area_cache = addr);
215 /* pull free_area_cache down to the first hole */
216 if (mm->free_area_cache == vma->vm_end) {
217 mm->free_area_cache = vma->vm_start;
218 mm->cached_hole_size = largest_hole;
222 /* remember the largest hole we saw so far */
223 if (addr + largest_hole < vma->vm_start)
224 largest_hole = vma->vm_start - addr;
226 /* try just below the current vma->vm_start */
227 addr = (vma->vm_start - len) & HPAGE_MASK;
228 } while (len <= vma->vm_start);
232 * if hint left us with no space for the requested
233 * mapping then try again:
236 mm->free_area_cache = base;
242 * A failed mmap() very likely causes application failure,
243 * so fall back to the bottom-up function here. This scenario
244 * can happen with large stack limits and large mmap()
247 mm->free_area_cache = TASK_UNMAPPED_BASE;
248 mm->cached_hole_size = ~0UL;
249 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
253 * Restore the topdown base:
255 mm->free_area_cache = base;
256 mm->cached_hole_size = ~0UL;
262 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
263 unsigned long len, unsigned long pgoff, unsigned long flags)
265 struct mm_struct *mm = current->mm;
266 struct vm_area_struct *vma;
268 if (len & ~HPAGE_MASK)
274 addr = ALIGN(addr, HPAGE_SIZE);
275 vma = find_vma(mm, addr);
276 if (TASK_SIZE - len >= addr &&
277 (!vma || addr + len <= vma->vm_start))
280 if (mm->get_unmapped_area == arch_get_unmapped_area)
281 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
284 return hugetlb_get_unmapped_area_topdown(file, addr, len,
288 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/