Merge master.kernel.org:/home/rmk/linux-2.6-i2c manually
[linux-2.6] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pud_t *pud;
25         pte_t *pte = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pud = pud_alloc(mm, pgd, addr);
29         if (pud)
30                 pte = (pte_t *) pmd_alloc(mm, pud, addr);
31         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
32
33         return pte;
34 }
35
36 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
37 {
38         pgd_t *pgd;
39         pud_t *pud;
40         pmd_t *pmd = NULL;
41
42         pgd = pgd_offset(mm, addr);
43         if (pgd_present(*pgd)) {
44                 pud = pud_offset(pgd, addr);
45                 if (pud_present(*pud))
46                         pmd = pmd_offset(pud, addr);
47         }
48         return (pte_t *) pmd;
49 }
50
51 /*
52  * This function checks for proper alignment of input addr and len parameters.
53  */
54 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
55 {
56         if (len & ~HPAGE_MASK)
57                 return -EINVAL;
58         if (addr & ~HPAGE_MASK)
59                 return -EINVAL;
60         return 0;
61 }
62
63 #if 0   /* This is just for testing */
64 struct page *
65 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
66 {
67         unsigned long start = address;
68         int length = 1;
69         int nr;
70         struct page *page;
71         struct vm_area_struct *vma;
72
73         vma = find_vma(mm, addr);
74         if (!vma || !is_vm_hugetlb_page(vma))
75                 return ERR_PTR(-EINVAL);
76
77         pte = huge_pte_offset(mm, address);
78
79         /* hugetlb should be locked, and hence, prefaulted */
80         WARN_ON(!pte || pte_none(*pte));
81
82         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
83
84         WARN_ON(!PageCompound(page));
85
86         return page;
87 }
88
89 int pmd_huge(pmd_t pmd)
90 {
91         return 0;
92 }
93
94 struct page *
95 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
96                 pmd_t *pmd, int write)
97 {
98         return NULL;
99 }
100
101 #else
102
103 struct page *
104 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
105 {
106         return ERR_PTR(-EINVAL);
107 }
108
109 int pmd_huge(pmd_t pmd)
110 {
111         return !!(pmd_val(pmd) & _PAGE_PSE);
112 }
113
114 struct page *
115 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
116                 pmd_t *pmd, int write)
117 {
118         struct page *page;
119
120         page = pte_page(*(pte_t *)pmd);
121         if (page)
122                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
123         return page;
124 }
125 #endif
126
127 /* x86_64 also uses this file */
128
129 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
130 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
131                 unsigned long addr, unsigned long len,
132                 unsigned long pgoff, unsigned long flags)
133 {
134         struct mm_struct *mm = current->mm;
135         struct vm_area_struct *vma;
136         unsigned long start_addr;
137
138         if (len > mm->cached_hole_size) {
139                 start_addr = mm->free_area_cache;
140         } else {
141                 start_addr = TASK_UNMAPPED_BASE;
142                 mm->cached_hole_size = 0;
143         }
144
145 full_search:
146         addr = ALIGN(start_addr, HPAGE_SIZE);
147
148         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
149                 /* At this point:  (!vma || addr < vma->vm_end). */
150                 if (TASK_SIZE - len < addr) {
151                         /*
152                          * Start a new search - just in case we missed
153                          * some holes.
154                          */
155                         if (start_addr != TASK_UNMAPPED_BASE) {
156                                 start_addr = TASK_UNMAPPED_BASE;
157                                 mm->cached_hole_size = 0;
158                                 goto full_search;
159                         }
160                         return -ENOMEM;
161                 }
162                 if (!vma || addr + len <= vma->vm_start) {
163                         mm->free_area_cache = addr + len;
164                         return addr;
165                 }
166                 if (addr + mm->cached_hole_size < vma->vm_start)
167                         mm->cached_hole_size = vma->vm_start - addr;
168                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
169         }
170 }
171
172 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
173                 unsigned long addr0, unsigned long len,
174                 unsigned long pgoff, unsigned long flags)
175 {
176         struct mm_struct *mm = current->mm;
177         struct vm_area_struct *vma, *prev_vma;
178         unsigned long base = mm->mmap_base, addr = addr0;
179         unsigned long largest_hole = mm->cached_hole_size;
180         int first_time = 1;
181
182         /* don't allow allocations above current base */
183         if (mm->free_area_cache > base)
184                 mm->free_area_cache = base;
185
186         if (len <= largest_hole) {
187                 largest_hole = 0;
188                 mm->free_area_cache  = base;
189         }
190 try_again:
191         /* make sure it can fit in the remaining address space */
192         if (mm->free_area_cache < len)
193                 goto fail;
194
195         /* either no address requested or cant fit in requested address hole */
196         addr = (mm->free_area_cache - len) & HPAGE_MASK;
197         do {
198                 /*
199                  * Lookup failure means no vma is above this address,
200                  * i.e. return with success:
201                  */
202                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
203                         return addr;
204
205                 /*
206                  * new region fits between prev_vma->vm_end and
207                  * vma->vm_start, use it:
208                  */
209                 if (addr + len <= vma->vm_start &&
210                             (!prev_vma || (addr >= prev_vma->vm_end))) {
211                         /* remember the address as a hint for next time */
212                         mm->cached_hole_size = largest_hole;
213                         return (mm->free_area_cache = addr);
214                 } else {
215                         /* pull free_area_cache down to the first hole */
216                         if (mm->free_area_cache == vma->vm_end) {
217                                 mm->free_area_cache = vma->vm_start;
218                                 mm->cached_hole_size = largest_hole;
219                         }
220                 }
221
222                 /* remember the largest hole we saw so far */
223                 if (addr + largest_hole < vma->vm_start)
224                         largest_hole = vma->vm_start - addr;
225
226                 /* try just below the current vma->vm_start */
227                 addr = (vma->vm_start - len) & HPAGE_MASK;
228         } while (len <= vma->vm_start);
229
230 fail:
231         /*
232          * if hint left us with no space for the requested
233          * mapping then try again:
234          */
235         if (first_time) {
236                 mm->free_area_cache = base;
237                 largest_hole = 0;
238                 first_time = 0;
239                 goto try_again;
240         }
241         /*
242          * A failed mmap() very likely causes application failure,
243          * so fall back to the bottom-up function here. This scenario
244          * can happen with large stack limits and large mmap()
245          * allocations.
246          */
247         mm->free_area_cache = TASK_UNMAPPED_BASE;
248         mm->cached_hole_size = ~0UL;
249         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
250                         len, pgoff, flags);
251
252         /*
253          * Restore the topdown base:
254          */
255         mm->free_area_cache = base;
256         mm->cached_hole_size = ~0UL;
257
258         return addr;
259 }
260
261 unsigned long
262 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
263                 unsigned long len, unsigned long pgoff, unsigned long flags)
264 {
265         struct mm_struct *mm = current->mm;
266         struct vm_area_struct *vma;
267
268         if (len & ~HPAGE_MASK)
269                 return -EINVAL;
270         if (len > TASK_SIZE)
271                 return -ENOMEM;
272
273         if (addr) {
274                 addr = ALIGN(addr, HPAGE_SIZE);
275                 vma = find_vma(mm, addr);
276                 if (TASK_SIZE - len >= addr &&
277                     (!vma || addr + len <= vma->vm_start))
278                         return addr;
279         }
280         if (mm->get_unmapped_area == arch_get_unmapped_area)
281                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
282                                 pgoff, flags);
283         else
284                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
285                                 pgoff, flags);
286 }
287
288 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
289