x86: process_64.c declare __switch_to() and sys_arch_prctl before they get used
[linux-2.6] / mm / nommu.c
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  *  Copyright (c) 2007      Paul Mundt <lethal@linux-sh.org>
14  */
15
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/tracehook.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mount.h>
29 #include <linux/personality.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/tlb.h>
35 #include <asm/tlbflush.h>
36
37 #include "internal.h"
38
39 void *high_memory;
40 struct page *mem_map;
41 unsigned long max_mapnr;
42 unsigned long num_physpages;
43 unsigned long askedalloc, realalloc;
44 atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
45 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
46 int sysctl_overcommit_ratio = 50; /* default is 50% */
47 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
48 int heap_stack_gap = 0;
49
50 EXPORT_SYMBOL(mem_map);
51 EXPORT_SYMBOL(num_physpages);
52
53 /* list of shareable VMAs */
54 struct rb_root nommu_vma_tree = RB_ROOT;
55 DECLARE_RWSEM(nommu_vma_sem);
56
57 struct vm_operations_struct generic_file_vm_ops = {
58 };
59
60 /*
61  * Handle all mappings that got truncated by a "truncate()"
62  * system call.
63  *
64  * NOTE! We have to be ready to update the memory sharing
65  * between the file and the memory map for a potential last
66  * incomplete page.  Ugly, but necessary.
67  */
68 int vmtruncate(struct inode *inode, loff_t offset)
69 {
70         struct address_space *mapping = inode->i_mapping;
71         unsigned long limit;
72
73         if (inode->i_size < offset)
74                 goto do_expand;
75         i_size_write(inode, offset);
76
77         truncate_inode_pages(mapping, offset);
78         goto out_truncate;
79
80 do_expand:
81         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
82         if (limit != RLIM_INFINITY && offset > limit)
83                 goto out_sig;
84         if (offset > inode->i_sb->s_maxbytes)
85                 goto out;
86         i_size_write(inode, offset);
87
88 out_truncate:
89         if (inode->i_op && inode->i_op->truncate)
90                 inode->i_op->truncate(inode);
91         return 0;
92 out_sig:
93         send_sig(SIGXFSZ, current, 0);
94 out:
95         return -EFBIG;
96 }
97
98 EXPORT_SYMBOL(vmtruncate);
99
100 /*
101  * Return the total memory allocated for this pointer, not
102  * just what the caller asked for.
103  *
104  * Doesn't have to be accurate, i.e. may have races.
105  */
106 unsigned int kobjsize(const void *objp)
107 {
108         struct page *page;
109
110         /*
111          * If the object we have should not have ksize performed on it,
112          * return size of 0
113          */
114         if (!objp || !virt_addr_valid(objp))
115                 return 0;
116
117         page = virt_to_head_page(objp);
118
119         /*
120          * If the allocator sets PageSlab, we know the pointer came from
121          * kmalloc().
122          */
123         if (PageSlab(page))
124                 return ksize(objp);
125
126         /*
127          * The ksize() function is only guaranteed to work for pointers
128          * returned by kmalloc(). So handle arbitrary pointers here.
129          */
130         return PAGE_SIZE << compound_order(page);
131 }
132
133 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
134                      unsigned long start, int len, int flags,
135                 struct page **pages, struct vm_area_struct **vmas)
136 {
137         struct vm_area_struct *vma;
138         unsigned long vm_flags;
139         int i;
140         int write = !!(flags & GUP_FLAGS_WRITE);
141         int force = !!(flags & GUP_FLAGS_FORCE);
142         int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
143
144         /* calculate required read or write permissions.
145          * - if 'force' is set, we only require the "MAY" flags.
146          */
147         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
148         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
149
150         for (i = 0; i < len; i++) {
151                 vma = find_vma(mm, start);
152                 if (!vma)
153                         goto finish_or_fault;
154
155                 /* protect what we can, including chardevs */
156                 if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
157                     (!ignore && !(vm_flags & vma->vm_flags)))
158                         goto finish_or_fault;
159
160                 if (pages) {
161                         pages[i] = virt_to_page(start);
162                         if (pages[i])
163                                 page_cache_get(pages[i]);
164                 }
165                 if (vmas)
166                         vmas[i] = vma;
167                 start += PAGE_SIZE;
168         }
169
170         return i;
171
172 finish_or_fault:
173         return i ? : -EFAULT;
174 }
175
176
177 /*
178  * get a list of pages in an address range belonging to the specified process
179  * and indicate the VMA that covers each page
180  * - this is potentially dodgy as we may end incrementing the page count of a
181  *   slab page or a secondary page from a compound page
182  * - don't permit access to VMAs that don't support it, such as I/O mappings
183  */
184 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
185         unsigned long start, int len, int write, int force,
186         struct page **pages, struct vm_area_struct **vmas)
187 {
188         int flags = 0;
189
190         if (write)
191                 flags |= GUP_FLAGS_WRITE;
192         if (force)
193                 flags |= GUP_FLAGS_FORCE;
194
195         return __get_user_pages(tsk, mm,
196                                 start, len, flags,
197                                 pages, vmas);
198 }
199 EXPORT_SYMBOL(get_user_pages);
200
201 DEFINE_RWLOCK(vmlist_lock);
202 struct vm_struct *vmlist;
203
204 void vfree(const void *addr)
205 {
206         kfree(addr);
207 }
208 EXPORT_SYMBOL(vfree);
209
210 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
211 {
212         /*
213          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
214          * returns only a logical address.
215          */
216         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
217 }
218 EXPORT_SYMBOL(__vmalloc);
219
220 void *vmalloc_user(unsigned long size)
221 {
222         void *ret;
223
224         ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
225                         PAGE_KERNEL);
226         if (ret) {
227                 struct vm_area_struct *vma;
228
229                 down_write(&current->mm->mmap_sem);
230                 vma = find_vma(current->mm, (unsigned long)ret);
231                 if (vma)
232                         vma->vm_flags |= VM_USERMAP;
233                 up_write(&current->mm->mmap_sem);
234         }
235
236         return ret;
237 }
238 EXPORT_SYMBOL(vmalloc_user);
239
240 struct page *vmalloc_to_page(const void *addr)
241 {
242         return virt_to_page(addr);
243 }
244 EXPORT_SYMBOL(vmalloc_to_page);
245
246 unsigned long vmalloc_to_pfn(const void *addr)
247 {
248         return page_to_pfn(virt_to_page(addr));
249 }
250 EXPORT_SYMBOL(vmalloc_to_pfn);
251
252 long vread(char *buf, char *addr, unsigned long count)
253 {
254         memcpy(buf, addr, count);
255         return count;
256 }
257
258 long vwrite(char *buf, char *addr, unsigned long count)
259 {
260         /* Don't allow overflow */
261         if ((unsigned long) addr + count < count)
262                 count = -(unsigned long) addr;
263
264         memcpy(addr, buf, count);
265         return(count);
266 }
267
268 /*
269  *      vmalloc  -  allocate virtually continguos memory
270  *
271  *      @size:          allocation size
272  *
273  *      Allocate enough pages to cover @size from the page level
274  *      allocator and map them into continguos kernel virtual space.
275  *
276  *      For tight control over page level allocator and protection flags
277  *      use __vmalloc() instead.
278  */
279 void *vmalloc(unsigned long size)
280 {
281        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
282 }
283 EXPORT_SYMBOL(vmalloc);
284
285 void *vmalloc_node(unsigned long size, int node)
286 {
287         return vmalloc(size);
288 }
289 EXPORT_SYMBOL(vmalloc_node);
290
291 #ifndef PAGE_KERNEL_EXEC
292 # define PAGE_KERNEL_EXEC PAGE_KERNEL
293 #endif
294
295 /**
296  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
297  *      @size:          allocation size
298  *
299  *      Kernel-internal function to allocate enough pages to cover @size
300  *      the page level allocator and map them into contiguous and
301  *      executable kernel virtual space.
302  *
303  *      For tight control over page level allocator and protection flags
304  *      use __vmalloc() instead.
305  */
306
307 void *vmalloc_exec(unsigned long size)
308 {
309         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
310 }
311
312 /**
313  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
314  *      @size:          allocation size
315  *
316  *      Allocate enough 32bit PA addressable pages to cover @size from the
317  *      page level allocator and map them into continguos kernel virtual space.
318  */
319 void *vmalloc_32(unsigned long size)
320 {
321         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
322 }
323 EXPORT_SYMBOL(vmalloc_32);
324
325 /**
326  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
327  *      @size:          allocation size
328  *
329  * The resulting memory area is 32bit addressable and zeroed so it can be
330  * mapped to userspace without leaking data.
331  *
332  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
333  * remap_vmalloc_range() are permissible.
334  */
335 void *vmalloc_32_user(unsigned long size)
336 {
337         /*
338          * We'll have to sort out the ZONE_DMA bits for 64-bit,
339          * but for now this can simply use vmalloc_user() directly.
340          */
341         return vmalloc_user(size);
342 }
343 EXPORT_SYMBOL(vmalloc_32_user);
344
345 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
346 {
347         BUG();
348         return NULL;
349 }
350 EXPORT_SYMBOL(vmap);
351
352 void vunmap(const void *addr)
353 {
354         BUG();
355 }
356 EXPORT_SYMBOL(vunmap);
357
358 /*
359  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
360  * have one.
361  */
362 void  __attribute__((weak)) vmalloc_sync_all(void)
363 {
364 }
365
366 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
367                    struct page *page)
368 {
369         return -EINVAL;
370 }
371 EXPORT_SYMBOL(vm_insert_page);
372
373 /*
374  *  sys_brk() for the most part doesn't need the global kernel
375  *  lock, except when an application is doing something nasty
376  *  like trying to un-brk an area that has already been mapped
377  *  to a regular file.  in this case, the unmapping will need
378  *  to invoke file system routines that need the global lock.
379  */
380 asmlinkage unsigned long sys_brk(unsigned long brk)
381 {
382         struct mm_struct *mm = current->mm;
383
384         if (brk < mm->start_brk || brk > mm->context.end_brk)
385                 return mm->brk;
386
387         if (mm->brk == brk)
388                 return mm->brk;
389
390         /*
391          * Always allow shrinking brk
392          */
393         if (brk <= mm->brk) {
394                 mm->brk = brk;
395                 return brk;
396         }
397
398         /*
399          * Ok, looks good - let it rip.
400          */
401         return mm->brk = brk;
402 }
403
404 #ifdef DEBUG
405 static void show_process_blocks(void)
406 {
407         struct vm_list_struct *vml;
408
409         printk("Process blocks %d:", current->pid);
410
411         for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
412                 printk(" %p: %p", vml, vml->vma);
413                 if (vml->vma)
414                         printk(" (%d @%lx #%d)",
415                                kobjsize((void *) vml->vma->vm_start),
416                                vml->vma->vm_start,
417                                atomic_read(&vml->vma->vm_usage));
418                 printk(vml->next ? " ->" : ".\n");
419         }
420 }
421 #endif /* DEBUG */
422
423 /*
424  * add a VMA into a process's mm_struct in the appropriate place in the list
425  * - should be called with mm->mmap_sem held writelocked
426  */
427 static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
428 {
429         struct vm_list_struct **ppv;
430
431         for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
432                 if ((*ppv)->vma->vm_start > vml->vma->vm_start)
433                         break;
434
435         vml->next = *ppv;
436         *ppv = vml;
437 }
438
439 /*
440  * look up the first VMA in which addr resides, NULL if none
441  * - should be called with mm->mmap_sem at least held readlocked
442  */
443 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
444 {
445         struct vm_list_struct *loop, *vml;
446
447         /* search the vm_start ordered list */
448         vml = NULL;
449         for (loop = mm->context.vmlist; loop; loop = loop->next) {
450                 if (loop->vma->vm_start > addr)
451                         break;
452                 vml = loop;
453         }
454
455         if (vml && vml->vma->vm_end > addr)
456                 return vml->vma;
457
458         return NULL;
459 }
460 EXPORT_SYMBOL(find_vma);
461
462 /*
463  * find a VMA
464  * - we don't extend stack VMAs under NOMMU conditions
465  */
466 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
467 {
468         return find_vma(mm, addr);
469 }
470
471 int expand_stack(struct vm_area_struct *vma, unsigned long address)
472 {
473         return -ENOMEM;
474 }
475
476 /*
477  * look up the first VMA exactly that exactly matches addr
478  * - should be called with mm->mmap_sem at least held readlocked
479  */
480 static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
481                                                     unsigned long addr)
482 {
483         struct vm_list_struct *vml;
484
485         /* search the vm_start ordered list */
486         for (vml = mm->context.vmlist; vml; vml = vml->next) {
487                 if (vml->vma->vm_start == addr)
488                         return vml->vma;
489                 if (vml->vma->vm_start > addr)
490                         break;
491         }
492
493         return NULL;
494 }
495
496 /*
497  * find a VMA in the global tree
498  */
499 static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
500 {
501         struct vm_area_struct *vma;
502         struct rb_node *n = nommu_vma_tree.rb_node;
503
504         while (n) {
505                 vma = rb_entry(n, struct vm_area_struct, vm_rb);
506
507                 if (start < vma->vm_start)
508                         n = n->rb_left;
509                 else if (start > vma->vm_start)
510                         n = n->rb_right;
511                 else
512                         return vma;
513         }
514
515         return NULL;
516 }
517
518 /*
519  * add a VMA in the global tree
520  */
521 static void add_nommu_vma(struct vm_area_struct *vma)
522 {
523         struct vm_area_struct *pvma;
524         struct address_space *mapping;
525         struct rb_node **p = &nommu_vma_tree.rb_node;
526         struct rb_node *parent = NULL;
527
528         /* add the VMA to the mapping */
529         if (vma->vm_file) {
530                 mapping = vma->vm_file->f_mapping;
531
532                 flush_dcache_mmap_lock(mapping);
533                 vma_prio_tree_insert(vma, &mapping->i_mmap);
534                 flush_dcache_mmap_unlock(mapping);
535         }
536
537         /* add the VMA to the master list */
538         while (*p) {
539                 parent = *p;
540                 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
541
542                 if (vma->vm_start < pvma->vm_start) {
543                         p = &(*p)->rb_left;
544                 }
545                 else if (vma->vm_start > pvma->vm_start) {
546                         p = &(*p)->rb_right;
547                 }
548                 else {
549                         /* mappings are at the same address - this can only
550                          * happen for shared-mem chardevs and shared file
551                          * mappings backed by ramfs/tmpfs */
552                         BUG_ON(!(pvma->vm_flags & VM_SHARED));
553
554                         if (vma < pvma)
555                                 p = &(*p)->rb_left;
556                         else if (vma > pvma)
557                                 p = &(*p)->rb_right;
558                         else
559                                 BUG();
560                 }
561         }
562
563         rb_link_node(&vma->vm_rb, parent, p);
564         rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
565 }
566
567 /*
568  * delete a VMA from the global list
569  */
570 static void delete_nommu_vma(struct vm_area_struct *vma)
571 {
572         struct address_space *mapping;
573
574         /* remove the VMA from the mapping */
575         if (vma->vm_file) {
576                 mapping = vma->vm_file->f_mapping;
577
578                 flush_dcache_mmap_lock(mapping);
579                 vma_prio_tree_remove(vma, &mapping->i_mmap);
580                 flush_dcache_mmap_unlock(mapping);
581         }
582
583         /* remove from the master list */
584         rb_erase(&vma->vm_rb, &nommu_vma_tree);
585 }
586
587 /*
588  * determine whether a mapping should be permitted and, if so, what sort of
589  * mapping we're capable of supporting
590  */
591 static int validate_mmap_request(struct file *file,
592                                  unsigned long addr,
593                                  unsigned long len,
594                                  unsigned long prot,
595                                  unsigned long flags,
596                                  unsigned long pgoff,
597                                  unsigned long *_capabilities)
598 {
599         unsigned long capabilities;
600         unsigned long reqprot = prot;
601         int ret;
602
603         /* do the simple checks first */
604         if (flags & MAP_FIXED || addr) {
605                 printk(KERN_DEBUG
606                        "%d: Can't do fixed-address/overlay mmap of RAM\n",
607                        current->pid);
608                 return -EINVAL;
609         }
610
611         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
612             (flags & MAP_TYPE) != MAP_SHARED)
613                 return -EINVAL;
614
615         if (!len)
616                 return -EINVAL;
617
618         /* Careful about overflows.. */
619         len = PAGE_ALIGN(len);
620         if (!len || len > TASK_SIZE)
621                 return -ENOMEM;
622
623         /* offset overflow? */
624         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
625                 return -EOVERFLOW;
626
627         if (file) {
628                 /* validate file mapping requests */
629                 struct address_space *mapping;
630
631                 /* files must support mmap */
632                 if (!file->f_op || !file->f_op->mmap)
633                         return -ENODEV;
634
635                 /* work out if what we've got could possibly be shared
636                  * - we support chardevs that provide their own "memory"
637                  * - we support files/blockdevs that are memory backed
638                  */
639                 mapping = file->f_mapping;
640                 if (!mapping)
641                         mapping = file->f_path.dentry->d_inode->i_mapping;
642
643                 capabilities = 0;
644                 if (mapping && mapping->backing_dev_info)
645                         capabilities = mapping->backing_dev_info->capabilities;
646
647                 if (!capabilities) {
648                         /* no explicit capabilities set, so assume some
649                          * defaults */
650                         switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
651                         case S_IFREG:
652                         case S_IFBLK:
653                                 capabilities = BDI_CAP_MAP_COPY;
654                                 break;
655
656                         case S_IFCHR:
657                                 capabilities =
658                                         BDI_CAP_MAP_DIRECT |
659                                         BDI_CAP_READ_MAP |
660                                         BDI_CAP_WRITE_MAP;
661                                 break;
662
663                         default:
664                                 return -EINVAL;
665                         }
666                 }
667
668                 /* eliminate any capabilities that we can't support on this
669                  * device */
670                 if (!file->f_op->get_unmapped_area)
671                         capabilities &= ~BDI_CAP_MAP_DIRECT;
672                 if (!file->f_op->read)
673                         capabilities &= ~BDI_CAP_MAP_COPY;
674
675                 if (flags & MAP_SHARED) {
676                         /* do checks for writing, appending and locking */
677                         if ((prot & PROT_WRITE) &&
678                             !(file->f_mode & FMODE_WRITE))
679                                 return -EACCES;
680
681                         if (IS_APPEND(file->f_path.dentry->d_inode) &&
682                             (file->f_mode & FMODE_WRITE))
683                                 return -EACCES;
684
685                         if (locks_verify_locked(file->f_path.dentry->d_inode))
686                                 return -EAGAIN;
687
688                         if (!(capabilities & BDI_CAP_MAP_DIRECT))
689                                 return -ENODEV;
690
691                         if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
692                             ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
693                             ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
694                             ) {
695                                 printk("MAP_SHARED not completely supported on !MMU\n");
696                                 return -EINVAL;
697                         }
698
699                         /* we mustn't privatise shared mappings */
700                         capabilities &= ~BDI_CAP_MAP_COPY;
701                 }
702                 else {
703                         /* we're going to read the file into private memory we
704                          * allocate */
705                         if (!(capabilities & BDI_CAP_MAP_COPY))
706                                 return -ENODEV;
707
708                         /* we don't permit a private writable mapping to be
709                          * shared with the backing device */
710                         if (prot & PROT_WRITE)
711                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
712                 }
713
714                 /* handle executable mappings and implied executable
715                  * mappings */
716                 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
717                         if (prot & PROT_EXEC)
718                                 return -EPERM;
719                 }
720                 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
721                         /* handle implication of PROT_EXEC by PROT_READ */
722                         if (current->personality & READ_IMPLIES_EXEC) {
723                                 if (capabilities & BDI_CAP_EXEC_MAP)
724                                         prot |= PROT_EXEC;
725                         }
726                 }
727                 else if ((prot & PROT_READ) &&
728                          (prot & PROT_EXEC) &&
729                          !(capabilities & BDI_CAP_EXEC_MAP)
730                          ) {
731                         /* backing file is not executable, try to copy */
732                         capabilities &= ~BDI_CAP_MAP_DIRECT;
733                 }
734         }
735         else {
736                 /* anonymous mappings are always memory backed and can be
737                  * privately mapped
738                  */
739                 capabilities = BDI_CAP_MAP_COPY;
740
741                 /* handle PROT_EXEC implication by PROT_READ */
742                 if ((prot & PROT_READ) &&
743                     (current->personality & READ_IMPLIES_EXEC))
744                         prot |= PROT_EXEC;
745         }
746
747         /* allow the security API to have its say */
748         ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
749         if (ret < 0)
750                 return ret;
751
752         /* looks okay */
753         *_capabilities = capabilities;
754         return 0;
755 }
756
757 /*
758  * we've determined that we can make the mapping, now translate what we
759  * now know into VMA flags
760  */
761 static unsigned long determine_vm_flags(struct file *file,
762                                         unsigned long prot,
763                                         unsigned long flags,
764                                         unsigned long capabilities)
765 {
766         unsigned long vm_flags;
767
768         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
769         vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
770         /* vm_flags |= mm->def_flags; */
771
772         if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
773                 /* attempt to share read-only copies of mapped file chunks */
774                 if (file && !(prot & PROT_WRITE))
775                         vm_flags |= VM_MAYSHARE;
776         }
777         else {
778                 /* overlay a shareable mapping on the backing device or inode
779                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
780                  * romfs/cramfs */
781                 if (flags & MAP_SHARED)
782                         vm_flags |= VM_MAYSHARE | VM_SHARED;
783                 else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
784                         vm_flags |= VM_MAYSHARE;
785         }
786
787         /* refuse to let anyone share private mappings with this process if
788          * it's being traced - otherwise breakpoints set in it may interfere
789          * with another untraced process
790          */
791         if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
792                 vm_flags &= ~VM_MAYSHARE;
793
794         return vm_flags;
795 }
796
797 /*
798  * set up a shared mapping on a file
799  */
800 static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
801 {
802         int ret;
803
804         ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
805         if (ret != -ENOSYS)
806                 return ret;
807
808         /* getting an ENOSYS error indicates that direct mmap isn't
809          * possible (as opposed to tried but failed) so we'll fall
810          * through to making a private copy of the data and mapping
811          * that if we can */
812         return -ENODEV;
813 }
814
815 /*
816  * set up a private mapping or an anonymous shared mapping
817  */
818 static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
819 {
820         void *base;
821         int ret;
822
823         /* invoke the file's mapping function so that it can keep track of
824          * shared mappings on devices or memory
825          * - VM_MAYSHARE will be set if it may attempt to share
826          */
827         if (vma->vm_file) {
828                 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
829                 if (ret != -ENOSYS) {
830                         /* shouldn't return success if we're not sharing */
831                         BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
832                         return ret; /* success or a real error */
833                 }
834
835                 /* getting an ENOSYS error indicates that direct mmap isn't
836                  * possible (as opposed to tried but failed) so we'll try to
837                  * make a private copy of the data and map that instead */
838         }
839
840         /* allocate some memory to hold the mapping
841          * - note that this may not return a page-aligned address if the object
842          *   we're allocating is smaller than a page
843          */
844         base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
845         if (!base)
846                 goto enomem;
847
848         vma->vm_start = (unsigned long) base;
849         vma->vm_end = vma->vm_start + len;
850         vma->vm_flags |= VM_MAPPED_COPY;
851
852 #ifdef WARN_ON_SLACK
853         if (len + WARN_ON_SLACK <= kobjsize(result))
854                 printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
855                        len, current->pid, kobjsize(result) - len);
856 #endif
857
858         if (vma->vm_file) {
859                 /* read the contents of a file into the copy */
860                 mm_segment_t old_fs;
861                 loff_t fpos;
862
863                 fpos = vma->vm_pgoff;
864                 fpos <<= PAGE_SHIFT;
865
866                 old_fs = get_fs();
867                 set_fs(KERNEL_DS);
868                 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
869                 set_fs(old_fs);
870
871                 if (ret < 0)
872                         goto error_free;
873
874                 /* clear the last little bit */
875                 if (ret < len)
876                         memset(base + ret, 0, len - ret);
877
878         } else {
879                 /* if it's an anonymous mapping, then just clear it */
880                 memset(base, 0, len);
881         }
882
883         return 0;
884
885 error_free:
886         kfree(base);
887         vma->vm_start = 0;
888         return ret;
889
890 enomem:
891         printk("Allocation of length %lu from process %d failed\n",
892                len, current->pid);
893         show_free_areas();
894         return -ENOMEM;
895 }
896
897 /*
898  * handle mapping creation for uClinux
899  */
900 unsigned long do_mmap_pgoff(struct file *file,
901                             unsigned long addr,
902                             unsigned long len,
903                             unsigned long prot,
904                             unsigned long flags,
905                             unsigned long pgoff)
906 {
907         struct vm_list_struct *vml = NULL;
908         struct vm_area_struct *vma = NULL;
909         struct rb_node *rb;
910         unsigned long capabilities, vm_flags;
911         void *result;
912         int ret;
913
914         if (!(flags & MAP_FIXED))
915                 addr = round_hint_to_min(addr);
916
917         /* decide whether we should attempt the mapping, and if so what sort of
918          * mapping */
919         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
920                                     &capabilities);
921         if (ret < 0)
922                 return ret;
923
924         /* we've determined that we can make the mapping, now translate what we
925          * now know into VMA flags */
926         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
927
928         /* we're going to need to record the mapping if it works */
929         vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
930         if (!vml)
931                 goto error_getting_vml;
932
933         down_write(&nommu_vma_sem);
934
935         /* if we want to share, we need to check for VMAs created by other
936          * mmap() calls that overlap with our proposed mapping
937          * - we can only share with an exact match on most regular files
938          * - shared mappings on character devices and memory backed files are
939          *   permitted to overlap inexactly as far as we are concerned for in
940          *   these cases, sharing is handled in the driver or filesystem rather
941          *   than here
942          */
943         if (vm_flags & VM_MAYSHARE) {
944                 unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
945                 unsigned long vmpglen;
946
947                 /* suppress VMA sharing for shared regions */
948                 if (vm_flags & VM_SHARED &&
949                     capabilities & BDI_CAP_MAP_DIRECT)
950                         goto dont_share_VMAs;
951
952                 for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
953                         vma = rb_entry(rb, struct vm_area_struct, vm_rb);
954
955                         if (!(vma->vm_flags & VM_MAYSHARE))
956                                 continue;
957
958                         /* search for overlapping mappings on the same file */
959                         if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
960                                 continue;
961
962                         if (vma->vm_pgoff >= pgoff + pglen)
963                                 continue;
964
965                         vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
966                         vmpglen >>= PAGE_SHIFT;
967                         if (pgoff >= vma->vm_pgoff + vmpglen)
968                                 continue;
969
970                         /* handle inexactly overlapping matches between mappings */
971                         if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
972                                 if (!(capabilities & BDI_CAP_MAP_DIRECT))
973                                         goto sharing_violation;
974                                 continue;
975                         }
976
977                         /* we've found a VMA we can share */
978                         atomic_inc(&vma->vm_usage);
979
980                         vml->vma = vma;
981                         result = (void *) vma->vm_start;
982                         goto shared;
983                 }
984
985         dont_share_VMAs:
986                 vma = NULL;
987
988                 /* obtain the address at which to make a shared mapping
989                  * - this is the hook for quasi-memory character devices to
990                  *   tell us the location of a shared mapping
991                  */
992                 if (file && file->f_op->get_unmapped_area) {
993                         addr = file->f_op->get_unmapped_area(file, addr, len,
994                                                              pgoff, flags);
995                         if (IS_ERR((void *) addr)) {
996                                 ret = addr;
997                                 if (ret != (unsigned long) -ENOSYS)
998                                         goto error;
999
1000                                 /* the driver refused to tell us where to site
1001                                  * the mapping so we'll have to attempt to copy
1002                                  * it */
1003                                 ret = (unsigned long) -ENODEV;
1004                                 if (!(capabilities & BDI_CAP_MAP_COPY))
1005                                         goto error;
1006
1007                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1008                         }
1009                 }
1010         }
1011
1012         /* we're going to need a VMA struct as well */
1013         vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
1014         if (!vma)
1015                 goto error_getting_vma;
1016
1017         INIT_LIST_HEAD(&vma->anon_vma_node);
1018         atomic_set(&vma->vm_usage, 1);
1019         if (file) {
1020                 get_file(file);
1021                 if (vm_flags & VM_EXECUTABLE) {
1022                         added_exe_file_vma(current->mm);
1023                         vma->vm_mm = current->mm;
1024                 }
1025         }
1026         vma->vm_file    = file;
1027         vma->vm_flags   = vm_flags;
1028         vma->vm_start   = addr;
1029         vma->vm_end     = addr + len;
1030         vma->vm_pgoff   = pgoff;
1031
1032         vml->vma = vma;
1033
1034         /* set up the mapping */
1035         if (file && vma->vm_flags & VM_SHARED)
1036                 ret = do_mmap_shared_file(vma, len);
1037         else
1038                 ret = do_mmap_private(vma, len);
1039         if (ret < 0)
1040                 goto error;
1041
1042         /* okay... we have a mapping; now we have to register it */
1043         result = (void *) vma->vm_start;
1044
1045         if (vma->vm_flags & VM_MAPPED_COPY) {
1046                 realalloc += kobjsize(result);
1047                 askedalloc += len;
1048         }
1049
1050         realalloc += kobjsize(vma);
1051         askedalloc += sizeof(*vma);
1052
1053         current->mm->total_vm += len >> PAGE_SHIFT;
1054
1055         add_nommu_vma(vma);
1056
1057  shared:
1058         realalloc += kobjsize(vml);
1059         askedalloc += sizeof(*vml);
1060
1061         add_vma_to_mm(current->mm, vml);
1062
1063         up_write(&nommu_vma_sem);
1064
1065         if (prot & PROT_EXEC)
1066                 flush_icache_range((unsigned long) result,
1067                                    (unsigned long) result + len);
1068
1069 #ifdef DEBUG
1070         printk("do_mmap:\n");
1071         show_process_blocks();
1072 #endif
1073
1074         return (unsigned long) result;
1075
1076  error:
1077         up_write(&nommu_vma_sem);
1078         kfree(vml);
1079         if (vma) {
1080                 if (vma->vm_file) {
1081                         fput(vma->vm_file);
1082                         if (vma->vm_flags & VM_EXECUTABLE)
1083                                 removed_exe_file_vma(vma->vm_mm);
1084                 }
1085                 kfree(vma);
1086         }
1087         return ret;
1088
1089  sharing_violation:
1090         up_write(&nommu_vma_sem);
1091         printk("Attempt to share mismatched mappings\n");
1092         kfree(vml);
1093         return -EINVAL;
1094
1095  error_getting_vma:
1096         up_write(&nommu_vma_sem);
1097         kfree(vml);
1098         printk("Allocation of vma for %lu byte allocation from process %d failed\n",
1099                len, current->pid);
1100         show_free_areas();
1101         return -ENOMEM;
1102
1103  error_getting_vml:
1104         printk("Allocation of vml for %lu byte allocation from process %d failed\n",
1105                len, current->pid);
1106         show_free_areas();
1107         return -ENOMEM;
1108 }
1109 EXPORT_SYMBOL(do_mmap_pgoff);
1110
1111 /*
1112  * handle mapping disposal for uClinux
1113  */
1114 static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
1115 {
1116         if (vma) {
1117                 down_write(&nommu_vma_sem);
1118
1119                 if (atomic_dec_and_test(&vma->vm_usage)) {
1120                         delete_nommu_vma(vma);
1121
1122                         if (vma->vm_ops && vma->vm_ops->close)
1123                                 vma->vm_ops->close(vma);
1124
1125                         /* IO memory and memory shared directly out of the pagecache from
1126                          * ramfs/tmpfs mustn't be released here */
1127                         if (vma->vm_flags & VM_MAPPED_COPY) {
1128                                 realalloc -= kobjsize((void *) vma->vm_start);
1129                                 askedalloc -= vma->vm_end - vma->vm_start;
1130                                 kfree((void *) vma->vm_start);
1131                         }
1132
1133                         realalloc -= kobjsize(vma);
1134                         askedalloc -= sizeof(*vma);
1135
1136                         if (vma->vm_file) {
1137                                 fput(vma->vm_file);
1138                                 if (vma->vm_flags & VM_EXECUTABLE)
1139                                         removed_exe_file_vma(mm);
1140                         }
1141                         kfree(vma);
1142                 }
1143
1144                 up_write(&nommu_vma_sem);
1145         }
1146 }
1147
1148 /*
1149  * release a mapping
1150  * - under NOMMU conditions the parameters must match exactly to the mapping to
1151  *   be removed
1152  */
1153 int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
1154 {
1155         struct vm_list_struct *vml, **parent;
1156         unsigned long end = addr + len;
1157
1158 #ifdef DEBUG
1159         printk("do_munmap:\n");
1160 #endif
1161
1162         for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
1163                 if ((*parent)->vma->vm_start > addr)
1164                         break;
1165                 if ((*parent)->vma->vm_start == addr &&
1166                     ((len == 0) || ((*parent)->vma->vm_end == end)))
1167                         goto found;
1168         }
1169
1170         printk("munmap of non-mmaped memory by process %d (%s): %p\n",
1171                current->pid, current->comm, (void *) addr);
1172         return -EINVAL;
1173
1174  found:
1175         vml = *parent;
1176
1177         put_vma(mm, vml->vma);
1178
1179         *parent = vml->next;
1180         realalloc -= kobjsize(vml);
1181         askedalloc -= sizeof(*vml);
1182         kfree(vml);
1183
1184         update_hiwater_vm(mm);
1185         mm->total_vm -= len >> PAGE_SHIFT;
1186
1187 #ifdef DEBUG
1188         show_process_blocks();
1189 #endif
1190
1191         return 0;
1192 }
1193 EXPORT_SYMBOL(do_munmap);
1194
1195 asmlinkage long sys_munmap(unsigned long addr, size_t len)
1196 {
1197         int ret;
1198         struct mm_struct *mm = current->mm;
1199
1200         down_write(&mm->mmap_sem);
1201         ret = do_munmap(mm, addr, len);
1202         up_write(&mm->mmap_sem);
1203         return ret;
1204 }
1205
1206 /*
1207  * Release all mappings
1208  */
1209 void exit_mmap(struct mm_struct * mm)
1210 {
1211         struct vm_list_struct *tmp;
1212
1213         if (mm) {
1214 #ifdef DEBUG
1215                 printk("Exit_mmap:\n");
1216 #endif
1217
1218                 mm->total_vm = 0;
1219
1220                 while ((tmp = mm->context.vmlist)) {
1221                         mm->context.vmlist = tmp->next;
1222                         put_vma(mm, tmp->vma);
1223
1224                         realalloc -= kobjsize(tmp);
1225                         askedalloc -= sizeof(*tmp);
1226                         kfree(tmp);
1227                 }
1228
1229 #ifdef DEBUG
1230                 show_process_blocks();
1231 #endif
1232         }
1233 }
1234
1235 unsigned long do_brk(unsigned long addr, unsigned long len)
1236 {
1237         return -ENOMEM;
1238 }
1239
1240 /*
1241  * expand (or shrink) an existing mapping, potentially moving it at the same
1242  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1243  *
1244  * under NOMMU conditions, we only permit changing a mapping's size, and only
1245  * as long as it stays within the hole allocated by the kmalloc() call in
1246  * do_mmap_pgoff() and the block is not shareable
1247  *
1248  * MREMAP_FIXED is not supported under NOMMU conditions
1249  */
1250 unsigned long do_mremap(unsigned long addr,
1251                         unsigned long old_len, unsigned long new_len,
1252                         unsigned long flags, unsigned long new_addr)
1253 {
1254         struct vm_area_struct *vma;
1255
1256         /* insanity checks first */
1257         if (new_len == 0)
1258                 return (unsigned long) -EINVAL;
1259
1260         if (flags & MREMAP_FIXED && new_addr != addr)
1261                 return (unsigned long) -EINVAL;
1262
1263         vma = find_vma_exact(current->mm, addr);
1264         if (!vma)
1265                 return (unsigned long) -EINVAL;
1266
1267         if (vma->vm_end != vma->vm_start + old_len)
1268                 return (unsigned long) -EFAULT;
1269
1270         if (vma->vm_flags & VM_MAYSHARE)
1271                 return (unsigned long) -EPERM;
1272
1273         if (new_len > kobjsize((void *) addr))
1274                 return (unsigned long) -ENOMEM;
1275
1276         /* all checks complete - do it */
1277         vma->vm_end = vma->vm_start + new_len;
1278
1279         askedalloc -= old_len;
1280         askedalloc += new_len;
1281
1282         return vma->vm_start;
1283 }
1284 EXPORT_SYMBOL(do_mremap);
1285
1286 asmlinkage unsigned long sys_mremap(unsigned long addr,
1287         unsigned long old_len, unsigned long new_len,
1288         unsigned long flags, unsigned long new_addr)
1289 {
1290         unsigned long ret;
1291
1292         down_write(&current->mm->mmap_sem);
1293         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1294         up_write(&current->mm->mmap_sem);
1295         return ret;
1296 }
1297
1298 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1299                         unsigned int foll_flags)
1300 {
1301         return NULL;
1302 }
1303
1304 int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1305                 unsigned long to, unsigned long size, pgprot_t prot)
1306 {
1307         vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
1308         return 0;
1309 }
1310 EXPORT_SYMBOL(remap_pfn_range);
1311
1312 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1313                         unsigned long pgoff)
1314 {
1315         unsigned int size = vma->vm_end - vma->vm_start;
1316
1317         if (!(vma->vm_flags & VM_USERMAP))
1318                 return -EINVAL;
1319
1320         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1321         vma->vm_end = vma->vm_start + size;
1322
1323         return 0;
1324 }
1325 EXPORT_SYMBOL(remap_vmalloc_range);
1326
1327 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1328 {
1329 }
1330
1331 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1332         unsigned long len, unsigned long pgoff, unsigned long flags)
1333 {
1334         return -ENOMEM;
1335 }
1336
1337 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1338 {
1339 }
1340
1341 void unmap_mapping_range(struct address_space *mapping,
1342                          loff_t const holebegin, loff_t const holelen,
1343                          int even_cows)
1344 {
1345 }
1346 EXPORT_SYMBOL(unmap_mapping_range);
1347
1348 /*
1349  * ask for an unmapped area at which to create a mapping on a file
1350  */
1351 unsigned long get_unmapped_area(struct file *file, unsigned long addr,
1352                                 unsigned long len, unsigned long pgoff,
1353                                 unsigned long flags)
1354 {
1355         unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
1356                                   unsigned long, unsigned long);
1357
1358         get_area = current->mm->get_unmapped_area;
1359         if (file && file->f_op && file->f_op->get_unmapped_area)
1360                 get_area = file->f_op->get_unmapped_area;
1361
1362         if (!get_area)
1363                 return -ENOSYS;
1364
1365         return get_area(file, addr, len, pgoff, flags);
1366 }
1367 EXPORT_SYMBOL(get_unmapped_area);
1368
1369 /*
1370  * Check that a process has enough memory to allocate a new virtual
1371  * mapping. 0 means there is enough memory for the allocation to
1372  * succeed and -ENOMEM implies there is not.
1373  *
1374  * We currently support three overcommit policies, which are set via the
1375  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1376  *
1377  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1378  * Additional code 2002 Jul 20 by Robert Love.
1379  *
1380  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1381  *
1382  * Note this is a helper function intended to be used by LSMs which
1383  * wish to use this logic.
1384  */
1385 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1386 {
1387         unsigned long free, allowed;
1388
1389         vm_acct_memory(pages);
1390
1391         /*
1392          * Sometimes we want to use more memory than we have
1393          */
1394         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1395                 return 0;
1396
1397         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1398                 unsigned long n;
1399
1400                 free = global_page_state(NR_FILE_PAGES);
1401                 free += nr_swap_pages;
1402
1403                 /*
1404                  * Any slabs which are created with the
1405                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1406                  * which are reclaimable, under pressure.  The dentry
1407                  * cache and most inode caches should fall into this
1408                  */
1409                 free += global_page_state(NR_SLAB_RECLAIMABLE);
1410
1411                 /*
1412                  * Leave the last 3% for root
1413                  */
1414                 if (!cap_sys_admin)
1415                         free -= free / 32;
1416
1417                 if (free > pages)
1418                         return 0;
1419
1420                 /*
1421                  * nr_free_pages() is very expensive on large systems,
1422                  * only call if we're about to fail.
1423                  */
1424                 n = nr_free_pages();
1425
1426                 /*
1427                  * Leave reserved pages. The pages are not for anonymous pages.
1428                  */
1429                 if (n <= totalreserve_pages)
1430                         goto error;
1431                 else
1432                         n -= totalreserve_pages;
1433
1434                 /*
1435                  * Leave the last 3% for root
1436                  */
1437                 if (!cap_sys_admin)
1438                         n -= n / 32;
1439                 free += n;
1440
1441                 if (free > pages)
1442                         return 0;
1443
1444                 goto error;
1445         }
1446
1447         allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1448         /*
1449          * Leave the last 3% for root
1450          */
1451         if (!cap_sys_admin)
1452                 allowed -= allowed / 32;
1453         allowed += total_swap_pages;
1454
1455         /* Don't let a single process grow too big:
1456            leave 3% of the size of this process for other processes */
1457         if (mm)
1458                 allowed -= mm->total_vm / 32;
1459
1460         /*
1461          * cast `allowed' as a signed long because vm_committed_space
1462          * sometimes has a negative value
1463          */
1464         if (atomic_long_read(&vm_committed_space) < (long)allowed)
1465                 return 0;
1466 error:
1467         vm_unacct_memory(pages);
1468
1469         return -ENOMEM;
1470 }
1471
1472 int in_gate_area_no_task(unsigned long addr)
1473 {
1474         return 0;
1475 }
1476
1477 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1478 {
1479         BUG();
1480         return 0;
1481 }
1482 EXPORT_SYMBOL(filemap_fault);
1483
1484 /*
1485  * Access another process' address space.
1486  * - source/target buffer must be kernel space
1487  */
1488 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1489 {
1490         struct vm_area_struct *vma;
1491         struct mm_struct *mm;
1492
1493         if (addr + len < addr)
1494                 return 0;
1495
1496         mm = get_task_mm(tsk);
1497         if (!mm)
1498                 return 0;
1499
1500         down_read(&mm->mmap_sem);
1501
1502         /* the access must start within one of the target process's mappings */
1503         vma = find_vma(mm, addr);
1504         if (vma) {
1505                 /* don't overrun this mapping */
1506                 if (addr + len >= vma->vm_end)
1507                         len = vma->vm_end - addr;
1508
1509                 /* only read or write mappings where it is permitted */
1510                 if (write && vma->vm_flags & VM_MAYWRITE)
1511                         len -= copy_to_user((void *) addr, buf, len);
1512                 else if (!write && vma->vm_flags & VM_MAYREAD)
1513                         len -= copy_from_user(buf, (void *) addr, len);
1514                 else
1515                         len = 0;
1516         } else {
1517                 len = 0;
1518         }
1519
1520         up_read(&mm->mmap_sem);
1521         mmput(mm);
1522         return len;
1523 }