Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/dtor/input.git manually
[linux-2.6] / arch / sh64 / mm / hugetlbpage.c
1 /*
2  * arch/sh64/mm/hugetlbpage.c
3  *
4  * SuperH HugeTLB page support.
5  *
6  * Cloned from sparc64 by Paul Mundt.
7  *
8  * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
9  */
10
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20
21 #include <asm/mman.h>
22 #include <asm/pgalloc.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/cacheflush.h>
26
27 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
28 {
29         pgd_t *pgd;
30         pmd_t *pmd;
31         pte_t *pte = NULL;
32
33         pgd = pgd_offset(mm, addr);
34         if (pgd) {
35                 pmd = pmd_alloc(mm, pgd, addr);
36                 if (pmd)
37                         pte = pte_alloc_map(mm, pmd, addr);
38         }
39         return pte;
40 }
41
42 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
43 {
44         pgd_t *pgd;
45         pmd_t *pmd;
46         pte_t *pte = NULL;
47
48         pgd = pgd_offset(mm, addr);
49         if (pgd) {
50                 pmd = pmd_offset(pgd, addr);
51                 if (pmd)
52                         pte = pte_offset_map(pmd, addr);
53         }
54         return pte;
55 }
56
57 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
58
59 static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
60                          struct page *page, pte_t * page_table, int write_access)
61 {
62         unsigned long i;
63         pte_t entry;
64
65         add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
66
67         if (write_access)
68                 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
69                                                        vma->vm_page_prot)));
70         else
71                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
72         entry = pte_mkyoung(entry);
73         mk_pte_huge(entry);
74
75         for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
76                 set_pte(page_table, entry);
77                 page_table++;
78
79                 pte_val(entry) += PAGE_SIZE;
80         }
81 }
82
83 pte_t huge_ptep_get_and_clear(pte_t *ptep)
84 {
85         pte_t entry;
86
87         entry = *ptep;
88
89         for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
90                 pte_clear(pte);
91                 pte++;
92         }
93
94         return entry;
95 }
96
97 /*
98  * This function checks for proper alignment of input addr and len parameters.
99  */
100 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
101 {
102         if (len & ~HPAGE_MASK)
103                 return -EINVAL;
104         if (addr & ~HPAGE_MASK)
105                 return -EINVAL;
106         return 0;
107 }
108
109 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
110                             struct vm_area_struct *vma)
111 {
112         pte_t *src_pte, *dst_pte, entry;
113         struct page *ptepage;
114         unsigned long addr = vma->vm_start;
115         unsigned long end = vma->vm_end;
116         int i;
117
118         while (addr < end) {
119                 dst_pte = huge_pte_alloc(dst, addr);
120                 if (!dst_pte)
121                         goto nomem;
122                 src_pte = huge_pte_offset(src, addr);
123                 BUG_ON(!src_pte || pte_none(*src_pte));
124                 entry = *src_pte;
125                 ptepage = pte_page(entry);
126                 get_page(ptepage);
127                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
128                         set_pte(dst_pte, entry);
129                         pte_val(entry) += PAGE_SIZE;
130                         dst_pte++;
131                 }
132                 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
133                 addr += HPAGE_SIZE;
134         }
135         return 0;
136
137 nomem:
138         return -ENOMEM;
139 }
140
141 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
142                         struct page **pages, struct vm_area_struct **vmas,
143                         unsigned long *position, int *length, int i)
144 {
145         unsigned long vaddr = *position;
146         int remainder = *length;
147
148         WARN_ON(!is_vm_hugetlb_page(vma));
149
150         while (vaddr < vma->vm_end && remainder) {
151                 if (pages) {
152                         pte_t *pte;
153                         struct page *page;
154
155                         pte = huge_pte_offset(mm, vaddr);
156
157                         /* hugetlb should be locked, and hence, prefaulted */
158                         BUG_ON(!pte || pte_none(*pte));
159
160                         page = pte_page(*pte);
161
162                         WARN_ON(!PageCompound(page));
163
164                         get_page(page);
165                         pages[i] = page;
166                 }
167
168                 if (vmas)
169                         vmas[i] = vma;
170
171                 vaddr += PAGE_SIZE;
172                 --remainder;
173                 ++i;
174         }
175
176         *length = remainder;
177         *position = vaddr;
178
179         return i;
180 }
181
182 struct page *follow_huge_addr(struct mm_struct *mm,
183                               unsigned long address, int write)
184 {
185         return ERR_PTR(-EINVAL);
186 }
187
188 int pmd_huge(pmd_t pmd)
189 {
190         return 0;
191 }
192
193 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
194                              pmd_t *pmd, int write)
195 {
196         return NULL;
197 }
198
199 void unmap_hugepage_range(struct vm_area_struct *vma,
200                           unsigned long start, unsigned long end)
201 {
202         struct mm_struct *mm = vma->vm_mm;
203         unsigned long address;
204         pte_t *pte;
205         struct page *page;
206         int i;
207
208         BUG_ON(start & (HPAGE_SIZE - 1));
209         BUG_ON(end & (HPAGE_SIZE - 1));
210
211         for (address = start; address < end; address += HPAGE_SIZE) {
212                 pte = huge_pte_offset(mm, address);
213                 BUG_ON(!pte);
214                 if (pte_none(*pte))
215                         continue;
216                 page = pte_page(*pte);
217                 put_page(page);
218                 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
219                         pte_clear(mm, address+(i*PAGE_SIZE), pte);
220                         pte++;
221                 }
222         }
223         add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
224         flush_tlb_range(vma, start, end);
225 }
226
227 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
228 {
229         struct mm_struct *mm = current->mm;
230         unsigned long addr;
231         int ret = 0;
232
233         BUG_ON(vma->vm_start & ~HPAGE_MASK);
234         BUG_ON(vma->vm_end & ~HPAGE_MASK);
235
236         spin_lock(&mm->page_table_lock);
237         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
238                 unsigned long idx;
239                 pte_t *pte = huge_pte_alloc(mm, addr);
240                 struct page *page;
241
242                 if (!pte) {
243                         ret = -ENOMEM;
244                         goto out;
245                 }
246                 if (!pte_none(*pte))
247                         continue;
248
249                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
250                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
251                 page = find_get_page(mapping, idx);
252                 if (!page) {
253                         /* charge the fs quota first */
254                         if (hugetlb_get_quota(mapping)) {
255                                 ret = -ENOMEM;
256                                 goto out;
257                         }
258                         page = alloc_huge_page();
259                         if (!page) {
260                                 hugetlb_put_quota(mapping);
261                                 ret = -ENOMEM;
262                                 goto out;
263                         }
264                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
265                         if (! ret) {
266                                 unlock_page(page);
267                         } else {
268                                 hugetlb_put_quota(mapping);
269                                 free_huge_page(page);
270                                 goto out;
271                         }
272                 }
273                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
274         }
275 out:
276         spin_unlock(&mm->page_table_lock);
277         return ret;
278 }