memcg: fix node_state handling
[linux-2.6] / mm / msync.c
1 /*
2  *      linux/mm/msync.c
3  *
4  * Copyright (C) 1994-1999  Linus Torvalds
5  */
6
7 /*
8  * The msync() system call.
9  */
10 #include <linux/fs.h>
11 #include <linux/mm.h>
12 #include <linux/mman.h>
13 #include <linux/file.h>
14 #include <linux/syscalls.h>
15 #include <linux/sched.h>
16
17 /*
18  * MS_SYNC syncs the entire file - including mappings.
19  *
20  * MS_ASYNC does not start I/O (it used to, up to 2.5.67).
21  * Nor does it marks the relevant pages dirty (it used to up to 2.6.17).
22  * Now it doesn't do anything, since dirty pages are properly tracked.
23  *
24  * The application may now run fsync() to
25  * write out the dirty pages and wait on the writeout and check the result.
26  * Or the application may run fadvise(FADV_DONTNEED) against the fd to start
27  * async writeout immediately.
28  * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
29  * applications.
30  */
31 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
32 {
33         unsigned long end;
34         struct mm_struct *mm = current->mm;
35         struct vm_area_struct *vma;
36         int unmapped_error = 0;
37         int error = -EINVAL;
38
39         if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
40                 goto out;
41         if (start & ~PAGE_MASK)
42                 goto out;
43         if ((flags & MS_ASYNC) && (flags & MS_SYNC))
44                 goto out;
45         error = -ENOMEM;
46         len = (len + ~PAGE_MASK) & PAGE_MASK;
47         end = start + len;
48         if (end < start)
49                 goto out;
50         error = 0;
51         if (end == start)
52                 goto out;
53         /*
54          * If the interval [start,end) covers some unmapped address ranges,
55          * just ignore them, but return -ENOMEM at the end.
56          */
57         down_read(&mm->mmap_sem);
58         vma = find_vma(mm, start);
59         for (;;) {
60                 struct file *file;
61
62                 /* Still start < end. */
63                 error = -ENOMEM;
64                 if (!vma)
65                         goto out_unlock;
66                 /* Here start < vma->vm_end. */
67                 if (start < vma->vm_start) {
68                         start = vma->vm_start;
69                         if (start >= end)
70                                 goto out_unlock;
71                         unmapped_error = -ENOMEM;
72                 }
73                 /* Here vma->vm_start <= start < vma->vm_end. */
74                 if ((flags & MS_INVALIDATE) &&
75                                 (vma->vm_flags & VM_LOCKED)) {
76                         error = -EBUSY;
77                         goto out_unlock;
78                 }
79                 file = vma->vm_file;
80                 start = vma->vm_end;
81                 if ((flags & MS_SYNC) && file &&
82                                 (vma->vm_flags & VM_SHARED)) {
83                         get_file(file);
84                         up_read(&mm->mmap_sem);
85                         error = do_fsync(file, 0);
86                         fput(file);
87                         if (error || start >= end)
88                                 goto out;
89                         down_read(&mm->mmap_sem);
90                         vma = find_vma(mm, start);
91                 } else {
92                         if (start >= end) {
93                                 error = 0;
94                                 goto out_unlock;
95                         }
96                         vma = vma->vm_next;
97                 }
98         }
99 out_unlock:
100         up_read(&mm->mmap_sem);
101 out:
102         return error ? : unmapped_error;
103 }