2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/init.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/smp_lock.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
18 #include <asm/tlbflush.h>
20 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26 pgd = pgd_offset(mm, addr);
27 pud = pud_alloc(mm, pgd, addr);
29 pte = (pte_t *) pmd_alloc(mm, pud, addr);
30 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
41 pgd = pgd_offset(mm, addr);
42 if (pgd_present(*pgd)) {
43 pud = pud_offset(pgd, addr);
44 if (pud_present(*pud))
45 pmd = pmd_offset(pud, addr);
50 #if 0 /* This is just for testing */
52 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
54 unsigned long start = address;
58 struct vm_area_struct *vma;
60 vma = find_vma(mm, addr);
61 if (!vma || !is_vm_hugetlb_page(vma))
62 return ERR_PTR(-EINVAL);
64 pte = huge_pte_offset(mm, address);
66 /* hugetlb should be locked, and hence, prefaulted */
67 WARN_ON(!pte || pte_none(*pte));
69 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
71 WARN_ON(!PageCompound(page));
76 int pmd_huge(pmd_t pmd)
82 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
83 pmd_t *pmd, int write)
91 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
93 return ERR_PTR(-EINVAL);
96 int pmd_huge(pmd_t pmd)
98 return !!(pmd_val(pmd) & _PAGE_PSE);
102 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103 pmd_t *pmd, int write)
107 page = pte_page(*(pte_t *)pmd);
109 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
114 /* x86_64 also uses this file */
116 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
117 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
118 unsigned long addr, unsigned long len,
119 unsigned long pgoff, unsigned long flags)
121 struct mm_struct *mm = current->mm;
122 struct vm_area_struct *vma;
123 unsigned long start_addr;
125 if (len > mm->cached_hole_size) {
126 start_addr = mm->free_area_cache;
128 start_addr = TASK_UNMAPPED_BASE;
129 mm->cached_hole_size = 0;
133 addr = ALIGN(start_addr, HPAGE_SIZE);
135 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
136 /* At this point: (!vma || addr < vma->vm_end). */
137 if (TASK_SIZE - len < addr) {
139 * Start a new search - just in case we missed
142 if (start_addr != TASK_UNMAPPED_BASE) {
143 start_addr = TASK_UNMAPPED_BASE;
144 mm->cached_hole_size = 0;
149 if (!vma || addr + len <= vma->vm_start) {
150 mm->free_area_cache = addr + len;
153 if (addr + mm->cached_hole_size < vma->vm_start)
154 mm->cached_hole_size = vma->vm_start - addr;
155 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
159 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
160 unsigned long addr0, unsigned long len,
161 unsigned long pgoff, unsigned long flags)
163 struct mm_struct *mm = current->mm;
164 struct vm_area_struct *vma, *prev_vma;
165 unsigned long base = mm->mmap_base, addr = addr0;
166 unsigned long largest_hole = mm->cached_hole_size;
169 /* don't allow allocations above current base */
170 if (mm->free_area_cache > base)
171 mm->free_area_cache = base;
173 if (len <= largest_hole) {
175 mm->free_area_cache = base;
178 /* make sure it can fit in the remaining address space */
179 if (mm->free_area_cache < len)
182 /* either no address requested or cant fit in requested address hole */
183 addr = (mm->free_area_cache - len) & HPAGE_MASK;
186 * Lookup failure means no vma is above this address,
187 * i.e. return with success:
189 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
193 * new region fits between prev_vma->vm_end and
194 * vma->vm_start, use it:
196 if (addr + len <= vma->vm_start &&
197 (!prev_vma || (addr >= prev_vma->vm_end))) {
198 /* remember the address as a hint for next time */
199 mm->cached_hole_size = largest_hole;
200 return (mm->free_area_cache = addr);
202 /* pull free_area_cache down to the first hole */
203 if (mm->free_area_cache == vma->vm_end) {
204 mm->free_area_cache = vma->vm_start;
205 mm->cached_hole_size = largest_hole;
209 /* remember the largest hole we saw so far */
210 if (addr + largest_hole < vma->vm_start)
211 largest_hole = vma->vm_start - addr;
213 /* try just below the current vma->vm_start */
214 addr = (vma->vm_start - len) & HPAGE_MASK;
215 } while (len <= vma->vm_start);
219 * if hint left us with no space for the requested
220 * mapping then try again:
223 mm->free_area_cache = base;
229 * A failed mmap() very likely causes application failure,
230 * so fall back to the bottom-up function here. This scenario
231 * can happen with large stack limits and large mmap()
234 mm->free_area_cache = TASK_UNMAPPED_BASE;
235 mm->cached_hole_size = ~0UL;
236 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
240 * Restore the topdown base:
242 mm->free_area_cache = base;
243 mm->cached_hole_size = ~0UL;
249 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
250 unsigned long len, unsigned long pgoff, unsigned long flags)
252 struct mm_struct *mm = current->mm;
253 struct vm_area_struct *vma;
255 if (len & ~HPAGE_MASK)
261 addr = ALIGN(addr, HPAGE_SIZE);
262 vma = find_vma(mm, addr);
263 if (TASK_SIZE - len >= addr &&
264 (!vma || addr + len <= vma->vm_start))
267 if (mm->get_unmapped_area == arch_get_unmapped_area)
268 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
271 return hugetlb_get_unmapped_area_topdown(file, addr, len,
275 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/