Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
[linux-2.6] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/smp_lock.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <asm/mman.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19
20 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
21 {
22         pgd_t *pgd;
23         pud_t *pud;
24         pte_t *pte = NULL;
25
26         pgd = pgd_offset(mm, addr);
27         pud = pud_alloc(mm, pgd, addr);
28         if (pud)
29                 pte = (pte_t *) pmd_alloc(mm, pud, addr);
30         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
31
32         return pte;
33 }
34
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
36 {
37         pgd_t *pgd;
38         pud_t *pud;
39         pmd_t *pmd = NULL;
40
41         pgd = pgd_offset(mm, addr);
42         if (pgd_present(*pgd)) {
43                 pud = pud_offset(pgd, addr);
44                 if (pud_present(*pud))
45                         pmd = pmd_offset(pud, addr);
46         }
47         return (pte_t *) pmd;
48 }
49
50 #if 0   /* This is just for testing */
51 struct page *
52 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
53 {
54         unsigned long start = address;
55         int length = 1;
56         int nr;
57         struct page *page;
58         struct vm_area_struct *vma;
59
60         vma = find_vma(mm, addr);
61         if (!vma || !is_vm_hugetlb_page(vma))
62                 return ERR_PTR(-EINVAL);
63
64         pte = huge_pte_offset(mm, address);
65
66         /* hugetlb should be locked, and hence, prefaulted */
67         WARN_ON(!pte || pte_none(*pte));
68
69         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
70
71         WARN_ON(!PageCompound(page));
72
73         return page;
74 }
75
76 int pmd_huge(pmd_t pmd)
77 {
78         return 0;
79 }
80
81 struct page *
82 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
83                 pmd_t *pmd, int write)
84 {
85         return NULL;
86 }
87
88 #else
89
90 struct page *
91 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
92 {
93         return ERR_PTR(-EINVAL);
94 }
95
96 int pmd_huge(pmd_t pmd)
97 {
98         return !!(pmd_val(pmd) & _PAGE_PSE);
99 }
100
101 struct page *
102 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103                 pmd_t *pmd, int write)
104 {
105         struct page *page;
106
107         page = pte_page(*(pte_t *)pmd);
108         if (page)
109                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
110         return page;
111 }
112 #endif
113
114 /* x86_64 also uses this file */
115
116 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
117 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
118                 unsigned long addr, unsigned long len,
119                 unsigned long pgoff, unsigned long flags)
120 {
121         struct mm_struct *mm = current->mm;
122         struct vm_area_struct *vma;
123         unsigned long start_addr;
124
125         if (len > mm->cached_hole_size) {
126                 start_addr = mm->free_area_cache;
127         } else {
128                 start_addr = TASK_UNMAPPED_BASE;
129                 mm->cached_hole_size = 0;
130         }
131
132 full_search:
133         addr = ALIGN(start_addr, HPAGE_SIZE);
134
135         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
136                 /* At this point:  (!vma || addr < vma->vm_end). */
137                 if (TASK_SIZE - len < addr) {
138                         /*
139                          * Start a new search - just in case we missed
140                          * some holes.
141                          */
142                         if (start_addr != TASK_UNMAPPED_BASE) {
143                                 start_addr = TASK_UNMAPPED_BASE;
144                                 mm->cached_hole_size = 0;
145                                 goto full_search;
146                         }
147                         return -ENOMEM;
148                 }
149                 if (!vma || addr + len <= vma->vm_start) {
150                         mm->free_area_cache = addr + len;
151                         return addr;
152                 }
153                 if (addr + mm->cached_hole_size < vma->vm_start)
154                         mm->cached_hole_size = vma->vm_start - addr;
155                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
156         }
157 }
158
159 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
160                 unsigned long addr0, unsigned long len,
161                 unsigned long pgoff, unsigned long flags)
162 {
163         struct mm_struct *mm = current->mm;
164         struct vm_area_struct *vma, *prev_vma;
165         unsigned long base = mm->mmap_base, addr = addr0;
166         unsigned long largest_hole = mm->cached_hole_size;
167         int first_time = 1;
168
169         /* don't allow allocations above current base */
170         if (mm->free_area_cache > base)
171                 mm->free_area_cache = base;
172
173         if (len <= largest_hole) {
174                 largest_hole = 0;
175                 mm->free_area_cache  = base;
176         }
177 try_again:
178         /* make sure it can fit in the remaining address space */
179         if (mm->free_area_cache < len)
180                 goto fail;
181
182         /* either no address requested or cant fit in requested address hole */
183         addr = (mm->free_area_cache - len) & HPAGE_MASK;
184         do {
185                 /*
186                  * Lookup failure means no vma is above this address,
187                  * i.e. return with success:
188                  */
189                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
190                         return addr;
191
192                 /*
193                  * new region fits between prev_vma->vm_end and
194                  * vma->vm_start, use it:
195                  */
196                 if (addr + len <= vma->vm_start &&
197                             (!prev_vma || (addr >= prev_vma->vm_end))) {
198                         /* remember the address as a hint for next time */
199                         mm->cached_hole_size = largest_hole;
200                         return (mm->free_area_cache = addr);
201                 } else {
202                         /* pull free_area_cache down to the first hole */
203                         if (mm->free_area_cache == vma->vm_end) {
204                                 mm->free_area_cache = vma->vm_start;
205                                 mm->cached_hole_size = largest_hole;
206                         }
207                 }
208
209                 /* remember the largest hole we saw so far */
210                 if (addr + largest_hole < vma->vm_start)
211                         largest_hole = vma->vm_start - addr;
212
213                 /* try just below the current vma->vm_start */
214                 addr = (vma->vm_start - len) & HPAGE_MASK;
215         } while (len <= vma->vm_start);
216
217 fail:
218         /*
219          * if hint left us with no space for the requested
220          * mapping then try again:
221          */
222         if (first_time) {
223                 mm->free_area_cache = base;
224                 largest_hole = 0;
225                 first_time = 0;
226                 goto try_again;
227         }
228         /*
229          * A failed mmap() very likely causes application failure,
230          * so fall back to the bottom-up function here. This scenario
231          * can happen with large stack limits and large mmap()
232          * allocations.
233          */
234         mm->free_area_cache = TASK_UNMAPPED_BASE;
235         mm->cached_hole_size = ~0UL;
236         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
237                         len, pgoff, flags);
238
239         /*
240          * Restore the topdown base:
241          */
242         mm->free_area_cache = base;
243         mm->cached_hole_size = ~0UL;
244
245         return addr;
246 }
247
248 unsigned long
249 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
250                 unsigned long len, unsigned long pgoff, unsigned long flags)
251 {
252         struct mm_struct *mm = current->mm;
253         struct vm_area_struct *vma;
254
255         if (len & ~HPAGE_MASK)
256                 return -EINVAL;
257         if (len > TASK_SIZE)
258                 return -ENOMEM;
259
260         if (addr) {
261                 addr = ALIGN(addr, HPAGE_SIZE);
262                 vma = find_vma(mm, addr);
263                 if (TASK_SIZE - len >= addr &&
264                     (!vma || addr + len <= vma->vm_start))
265                         return addr;
266         }
267         if (mm->get_unmapped_area == arch_get_unmapped_area)
268                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
269                                 pgoff, flags);
270         else
271                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
272                                 pgoff, flags);
273 }
274
275 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
276