usbfs micro optimitation
[linux-2.6] / mm / mremap.c
1 /*
2  *      mm/mremap.c
3  *
4  *      (C) Copyright 1996 Linus Torvalds
5  *
6  *      Address space accounting code   <alan@redhat.com>
7  *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8  */
9
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/shm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/highmem.h>
19 #include <linux/security.h>
20 #include <linux/syscalls.h>
21
22 #include <asm/uaccess.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25
26 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
27 {
28         pgd_t *pgd;
29         pud_t *pud;
30         pmd_t *pmd;
31
32         pgd = pgd_offset(mm, addr);
33         if (pgd_none_or_clear_bad(pgd))
34                 return NULL;
35
36         pud = pud_offset(pgd, addr);
37         if (pud_none_or_clear_bad(pud))
38                 return NULL;
39
40         pmd = pmd_offset(pud, addr);
41         if (pmd_none_or_clear_bad(pmd))
42                 return NULL;
43
44         return pmd;
45 }
46
47 static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
48 {
49         pgd_t *pgd;
50         pud_t *pud;
51         pmd_t *pmd;
52
53         pgd = pgd_offset(mm, addr);
54         pud = pud_alloc(mm, pgd, addr);
55         if (!pud)
56                 return NULL;
57
58         pmd = pmd_alloc(mm, pud, addr);
59         if (!pmd)
60                 return NULL;
61
62         if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
63                 return NULL;
64
65         return pmd;
66 }
67
68 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
69                 unsigned long old_addr, unsigned long old_end,
70                 struct vm_area_struct *new_vma, pmd_t *new_pmd,
71                 unsigned long new_addr)
72 {
73         struct address_space *mapping = NULL;
74         struct mm_struct *mm = vma->vm_mm;
75         pte_t *old_pte, *new_pte, pte;
76         spinlock_t *old_ptl, *new_ptl;
77
78         if (vma->vm_file) {
79                 /*
80                  * Subtle point from Rajesh Venkatasubramanian: before
81                  * moving file-based ptes, we must lock vmtruncate out,
82                  * since it might clean the dst vma before the src vma,
83                  * and we propagate stale pages into the dst afterward.
84                  */
85                 mapping = vma->vm_file->f_mapping;
86                 spin_lock(&mapping->i_mmap_lock);
87                 if (new_vma->vm_truncate_count &&
88                     new_vma->vm_truncate_count != vma->vm_truncate_count)
89                         new_vma->vm_truncate_count = 0;
90         }
91
92         /*
93          * We don't have to worry about the ordering of src and dst
94          * pte locks because exclusive mmap_sem prevents deadlock.
95          */
96         old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
97         new_pte = pte_offset_map_nested(new_pmd, new_addr);
98         new_ptl = pte_lockptr(mm, new_pmd);
99         if (new_ptl != old_ptl)
100                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
101         arch_enter_lazy_mmu_mode();
102
103         for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
104                                    new_pte++, new_addr += PAGE_SIZE) {
105                 if (pte_none(*old_pte))
106                         continue;
107                 pte = ptep_clear_flush(vma, old_addr, old_pte);
108                 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
109                 set_pte_at(mm, new_addr, new_pte, pte);
110         }
111
112         arch_leave_lazy_mmu_mode();
113         if (new_ptl != old_ptl)
114                 spin_unlock(new_ptl);
115         pte_unmap_nested(new_pte - 1);
116         pte_unmap_unlock(old_pte - 1, old_ptl);
117         if (mapping)
118                 spin_unlock(&mapping->i_mmap_lock);
119 }
120
121 #define LATENCY_LIMIT   (64 * PAGE_SIZE)
122
123 static unsigned long move_page_tables(struct vm_area_struct *vma,
124                 unsigned long old_addr, struct vm_area_struct *new_vma,
125                 unsigned long new_addr, unsigned long len)
126 {
127         unsigned long extent, next, old_end;
128         pmd_t *old_pmd, *new_pmd;
129
130         old_end = old_addr + len;
131         flush_cache_range(vma, old_addr, old_end);
132
133         for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
134                 cond_resched();
135                 next = (old_addr + PMD_SIZE) & PMD_MASK;
136                 if (next - 1 > old_end)
137                         next = old_end;
138                 extent = next - old_addr;
139                 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
140                 if (!old_pmd)
141                         continue;
142                 new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
143                 if (!new_pmd)
144                         break;
145                 next = (new_addr + PMD_SIZE) & PMD_MASK;
146                 if (extent > next - new_addr)
147                         extent = next - new_addr;
148                 if (extent > LATENCY_LIMIT)
149                         extent = LATENCY_LIMIT;
150                 move_ptes(vma, old_pmd, old_addr, old_addr + extent,
151                                 new_vma, new_pmd, new_addr);
152         }
153
154         return len + old_addr - old_end;        /* how much done */
155 }
156
157 static unsigned long move_vma(struct vm_area_struct *vma,
158                 unsigned long old_addr, unsigned long old_len,
159                 unsigned long new_len, unsigned long new_addr)
160 {
161         struct mm_struct *mm = vma->vm_mm;
162         struct vm_area_struct *new_vma;
163         unsigned long vm_flags = vma->vm_flags;
164         unsigned long new_pgoff;
165         unsigned long moved_len;
166         unsigned long excess = 0;
167         unsigned long hiwater_vm;
168         int split = 0;
169
170         /*
171          * We'd prefer to avoid failure later on in do_munmap:
172          * which may split one vma into three before unmapping.
173          */
174         if (mm->map_count >= sysctl_max_map_count - 3)
175                 return -ENOMEM;
176
177         new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
178         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
179         if (!new_vma)
180                 return -ENOMEM;
181
182         moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
183         if (moved_len < old_len) {
184                 /*
185                  * On error, move entries back from new area to old,
186                  * which will succeed since page tables still there,
187                  * and then proceed to unmap new area instead of old.
188                  */
189                 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
190                 vma = new_vma;
191                 old_len = new_len;
192                 old_addr = new_addr;
193                 new_addr = -ENOMEM;
194         }
195
196         /* Conceal VM_ACCOUNT so old reservation is not undone */
197         if (vm_flags & VM_ACCOUNT) {
198                 vma->vm_flags &= ~VM_ACCOUNT;
199                 excess = vma->vm_end - vma->vm_start - old_len;
200                 if (old_addr > vma->vm_start &&
201                     old_addr + old_len < vma->vm_end)
202                         split = 1;
203         }
204
205         /*
206          * If we failed to move page tables we still do total_vm increment
207          * since do_munmap() will decrement it by old_len == new_len.
208          *
209          * Since total_vm is about to be raised artificially high for a
210          * moment, we need to restore high watermark afterwards: if stats
211          * are taken meanwhile, total_vm and hiwater_vm appear too high.
212          * If this were a serious issue, we'd add a flag to do_munmap().
213          */
214         hiwater_vm = mm->hiwater_vm;
215         mm->total_vm += new_len >> PAGE_SHIFT;
216         vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
217
218         if (do_munmap(mm, old_addr, old_len) < 0) {
219                 /* OOM: unable to split vma, just get accounts right */
220                 vm_unacct_memory(excess >> PAGE_SHIFT);
221                 excess = 0;
222         }
223         mm->hiwater_vm = hiwater_vm;
224
225         /* Restore VM_ACCOUNT if one or two pieces of vma left */
226         if (excess) {
227                 vma->vm_flags |= VM_ACCOUNT;
228                 if (split)
229                         vma->vm_next->vm_flags |= VM_ACCOUNT;
230         }
231
232         if (vm_flags & VM_LOCKED) {
233                 mm->locked_vm += new_len >> PAGE_SHIFT;
234                 if (new_len > old_len)
235                         make_pages_present(new_addr + old_len,
236                                            new_addr + new_len);
237         }
238
239         return new_addr;
240 }
241
242 /*
243  * Expand (or shrink) an existing mapping, potentially moving it at the
244  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
245  *
246  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
247  * This option implies MREMAP_MAYMOVE.
248  */
249 unsigned long do_mremap(unsigned long addr,
250         unsigned long old_len, unsigned long new_len,
251         unsigned long flags, unsigned long new_addr)
252 {
253         struct mm_struct *mm = current->mm;
254         struct vm_area_struct *vma;
255         unsigned long ret = -EINVAL;
256         unsigned long charged = 0;
257
258         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
259                 goto out;
260
261         if (addr & ~PAGE_MASK)
262                 goto out;
263
264         old_len = PAGE_ALIGN(old_len);
265         new_len = PAGE_ALIGN(new_len);
266
267         /*
268          * We allow a zero old-len as a special case
269          * for DOS-emu "duplicate shm area" thing. But
270          * a zero new-len is nonsensical.
271          */
272         if (!new_len)
273                 goto out;
274
275         /* new_addr is only valid if MREMAP_FIXED is specified */
276         if (flags & MREMAP_FIXED) {
277                 if (new_addr & ~PAGE_MASK)
278                         goto out;
279                 if (!(flags & MREMAP_MAYMOVE))
280                         goto out;
281
282                 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
283                         goto out;
284
285                 /* Check if the location we're moving into overlaps the
286                  * old location at all, and fail if it does.
287                  */
288                 if ((new_addr <= addr) && (new_addr+new_len) > addr)
289                         goto out;
290
291                 if ((addr <= new_addr) && (addr+old_len) > new_addr)
292                         goto out;
293
294                 ret = do_munmap(mm, new_addr, new_len);
295                 if (ret)
296                         goto out;
297         }
298
299         /*
300          * Always allow a shrinking remap: that just unmaps
301          * the unnecessary pages..
302          * do_munmap does all the needed commit accounting
303          */
304         if (old_len >= new_len) {
305                 ret = do_munmap(mm, addr+new_len, old_len - new_len);
306                 if (ret && old_len != new_len)
307                         goto out;
308                 ret = addr;
309                 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
310                         goto out;
311                 old_len = new_len;
312         }
313
314         /*
315          * Ok, we need to grow..  or relocate.
316          */
317         ret = -EFAULT;
318         vma = find_vma(mm, addr);
319         if (!vma || vma->vm_start > addr)
320                 goto out;
321         if (is_vm_hugetlb_page(vma)) {
322                 ret = -EINVAL;
323                 goto out;
324         }
325         /* We can't remap across vm area boundaries */
326         if (old_len > vma->vm_end - addr)
327                 goto out;
328         if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
329                 if (new_len > old_len)
330                         goto out;
331         }
332         if (vma->vm_flags & VM_LOCKED) {
333                 unsigned long locked, lock_limit;
334                 locked = mm->locked_vm << PAGE_SHIFT;
335                 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
336                 locked += new_len - old_len;
337                 ret = -EAGAIN;
338                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
339                         goto out;
340         }
341         if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
342                 ret = -ENOMEM;
343                 goto out;
344         }
345
346         if (vma->vm_flags & VM_ACCOUNT) {
347                 charged = (new_len - old_len) >> PAGE_SHIFT;
348                 if (security_vm_enough_memory(charged))
349                         goto out_nc;
350         }
351
352         /* old_len exactly to the end of the area..
353          * And we're not relocating the area.
354          */
355         if (old_len == vma->vm_end - addr &&
356             !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
357             (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
358                 unsigned long max_addr = TASK_SIZE;
359                 if (vma->vm_next)
360                         max_addr = vma->vm_next->vm_start;
361                 /* can we just expand the current mapping? */
362                 if (max_addr - addr >= new_len) {
363                         int pages = (new_len - old_len) >> PAGE_SHIFT;
364
365                         vma_adjust(vma, vma->vm_start,
366                                 addr + new_len, vma->vm_pgoff, NULL);
367
368                         mm->total_vm += pages;
369                         vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
370                         if (vma->vm_flags & VM_LOCKED) {
371                                 mm->locked_vm += pages;
372                                 make_pages_present(addr + old_len,
373                                                    addr + new_len);
374                         }
375                         ret = addr;
376                         goto out;
377                 }
378         }
379
380         /*
381          * We weren't able to just expand or shrink the area,
382          * we need to create a new one and move it..
383          */
384         ret = -ENOMEM;
385         if (flags & MREMAP_MAYMOVE) {
386                 if (!(flags & MREMAP_FIXED)) {
387                         unsigned long map_flags = 0;
388                         if (vma->vm_flags & VM_MAYSHARE)
389                                 map_flags |= MAP_SHARED;
390
391                         new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
392                                                 vma->vm_pgoff, map_flags);
393                         ret = new_addr;
394                         if (new_addr & ~PAGE_MASK)
395                                 goto out;
396                 }
397                 ret = move_vma(vma, addr, old_len, new_len, new_addr);
398         }
399 out:
400         if (ret & ~PAGE_MASK)
401                 vm_unacct_memory(charged);
402 out_nc:
403         return ret;
404 }
405
406 asmlinkage unsigned long sys_mremap(unsigned long addr,
407         unsigned long old_len, unsigned long new_len,
408         unsigned long flags, unsigned long new_addr)
409 {
410         unsigned long ret;
411
412         down_write(&current->mm->mmap_sem);
413         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
414         up_write(&current->mm->mmap_sem);
415         return ret;
416 }