Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/madvise.c | |
3 | * | |
4 | * Copyright (C) 1999 Linus Torvalds | |
5 | * Copyright (C) 2002 Christoph Hellwig | |
6 | */ | |
7 | ||
8 | #include <linux/mman.h> | |
9 | #include <linux/pagemap.h> | |
10 | #include <linux/syscalls.h> | |
05b74384 | 11 | #include <linux/mempolicy.h> |
1da177e4 LT |
12 | #include <linux/hugetlb.h> |
13 | ||
14 | /* | |
15 | * We can potentially split a vm area into separate | |
16 | * areas, each area with its own behavior. | |
17 | */ | |
05b74384 PM |
18 | static long madvise_behavior(struct vm_area_struct * vma, |
19 | struct vm_area_struct **prev, | |
20 | unsigned long start, unsigned long end, int behavior) | |
1da177e4 LT |
21 | { |
22 | struct mm_struct * mm = vma->vm_mm; | |
23 | int error = 0; | |
05b74384 | 24 | pgoff_t pgoff; |
f8225661 | 25 | int new_flags = vma->vm_flags; |
e798c6e8 PM |
26 | |
27 | switch (behavior) { | |
f8225661 MT |
28 | case MADV_NORMAL: |
29 | new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; | |
30 | break; | |
e798c6e8 | 31 | case MADV_SEQUENTIAL: |
f8225661 | 32 | new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; |
e798c6e8 PM |
33 | break; |
34 | case MADV_RANDOM: | |
f8225661 | 35 | new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; |
e798c6e8 | 36 | break; |
f8225661 MT |
37 | case MADV_DONTFORK: |
38 | new_flags |= VM_DONTCOPY; | |
39 | break; | |
40 | case MADV_DOFORK: | |
41 | new_flags &= ~VM_DONTCOPY; | |
e798c6e8 PM |
42 | break; |
43 | } | |
44 | ||
05b74384 PM |
45 | if (new_flags == vma->vm_flags) { |
46 | *prev = vma; | |
836d5ffd | 47 | goto out; |
05b74384 PM |
48 | } |
49 | ||
50 | pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); | |
51 | *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, | |
52 | vma->vm_file, pgoff, vma_policy(vma)); | |
53 | if (*prev) { | |
54 | vma = *prev; | |
55 | goto success; | |
56 | } | |
57 | ||
58 | *prev = vma; | |
1da177e4 LT |
59 | |
60 | if (start != vma->vm_start) { | |
61 | error = split_vma(mm, vma, start, 1); | |
62 | if (error) | |
63 | goto out; | |
64 | } | |
65 | ||
66 | if (end != vma->vm_end) { | |
67 | error = split_vma(mm, vma, end, 0); | |
68 | if (error) | |
69 | goto out; | |
70 | } | |
71 | ||
836d5ffd | 72 | success: |
1da177e4 LT |
73 | /* |
74 | * vm_flags is protected by the mmap_sem held in write mode. | |
75 | */ | |
e798c6e8 | 76 | vma->vm_flags = new_flags; |
1da177e4 LT |
77 | |
78 | out: | |
79 | if (error == -ENOMEM) | |
80 | error = -EAGAIN; | |
81 | return error; | |
82 | } | |
83 | ||
84 | /* | |
85 | * Schedule all required I/O operations. Do not wait for completion. | |
86 | */ | |
87 | static long madvise_willneed(struct vm_area_struct * vma, | |
05b74384 | 88 | struct vm_area_struct ** prev, |
1da177e4 LT |
89 | unsigned long start, unsigned long end) |
90 | { | |
91 | struct file *file = vma->vm_file; | |
92 | ||
1bef4003 S |
93 | if (!file) |
94 | return -EBADF; | |
95 | ||
fe77ba6f CO |
96 | if (file->f_mapping->a_ops->get_xip_page) { |
97 | /* no bad return value, but ignore advice */ | |
98 | return 0; | |
99 | } | |
100 | ||
05b74384 | 101 | *prev = vma; |
1da177e4 LT |
102 | start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
103 | if (end > vma->vm_end) | |
104 | end = vma->vm_end; | |
105 | end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | |
106 | ||
107 | force_page_cache_readahead(file->f_mapping, | |
108 | file, start, max_sane_readahead(end - start)); | |
109 | return 0; | |
110 | } | |
111 | ||
112 | /* | |
113 | * Application no longer needs these pages. If the pages are dirty, | |
114 | * it's OK to just throw them away. The app will be more careful about | |
115 | * data it wants to keep. Be sure to free swap resources too. The | |
116 | * zap_page_range call sets things up for refill_inactive to actually free | |
117 | * these pages later if no one else has touched them in the meantime, | |
118 | * although we could add these pages to a global reuse list for | |
119 | * refill_inactive to pick up before reclaiming other pages. | |
120 | * | |
121 | * NB: This interface discards data rather than pushes it out to swap, | |
122 | * as some implementations do. This has performance implications for | |
123 | * applications like large transactional databases which want to discard | |
124 | * pages in anonymous maps after committing to backing store the data | |
125 | * that was kept in them. There is no reason to write this data out to | |
126 | * the swap area if the application is discarding it. | |
127 | * | |
128 | * An interface that causes the system to free clean pages and flush | |
129 | * dirty pages is already available as msync(MS_INVALIDATE). | |
130 | */ | |
131 | static long madvise_dontneed(struct vm_area_struct * vma, | |
05b74384 | 132 | struct vm_area_struct ** prev, |
1da177e4 LT |
133 | unsigned long start, unsigned long end) |
134 | { | |
05b74384 | 135 | *prev = vma; |
6aab341e | 136 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
1da177e4 LT |
137 | return -EINVAL; |
138 | ||
139 | if (unlikely(vma->vm_flags & VM_NONLINEAR)) { | |
140 | struct zap_details details = { | |
141 | .nonlinear_vma = vma, | |
142 | .last_index = ULONG_MAX, | |
143 | }; | |
144 | zap_page_range(vma, start, end - start, &details); | |
145 | } else | |
146 | zap_page_range(vma, start, end - start, NULL); | |
147 | return 0; | |
148 | } | |
149 | ||
f6b3ec23 BP |
150 | /* |
151 | * Application wants to free up the pages and associated backing store. | |
152 | * This is effectively punching a hole into the middle of a file. | |
153 | * | |
154 | * NOTE: Currently, only shmfs/tmpfs is supported for this operation. | |
155 | * Other filesystems return -ENOSYS. | |
156 | */ | |
157 | static long madvise_remove(struct vm_area_struct *vma, | |
158 | unsigned long start, unsigned long end) | |
159 | { | |
160 | struct address_space *mapping; | |
161 | loff_t offset, endoff; | |
162 | ||
163 | if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) | |
164 | return -EINVAL; | |
165 | ||
166 | if (!vma->vm_file || !vma->vm_file->f_mapping | |
167 | || !vma->vm_file->f_mapping->host) { | |
168 | return -EINVAL; | |
169 | } | |
170 | ||
69cf0fac HD |
171 | if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) |
172 | return -EACCES; | |
173 | ||
f6b3ec23 BP |
174 | mapping = vma->vm_file->f_mapping; |
175 | ||
176 | offset = (loff_t)(start - vma->vm_start) | |
177 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | |
178 | endoff = (loff_t)(end - vma->vm_start - 1) | |
179 | + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); | |
180 | return vmtruncate_range(mapping->host, offset, endoff); | |
181 | } | |
182 | ||
165cd402 | 183 | static long |
184 | madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, | |
185 | unsigned long start, unsigned long end, int behavior) | |
1da177e4 | 186 | { |
1bef4003 | 187 | long error; |
165cd402 | 188 | |
1da177e4 | 189 | switch (behavior) { |
f8225661 MT |
190 | case MADV_DOFORK: |
191 | if (vma->vm_flags & VM_IO) { | |
192 | error = -EINVAL; | |
193 | break; | |
194 | } | |
195 | case MADV_DONTFORK: | |
1da177e4 LT |
196 | case MADV_NORMAL: |
197 | case MADV_SEQUENTIAL: | |
198 | case MADV_RANDOM: | |
05b74384 | 199 | error = madvise_behavior(vma, prev, start, end, behavior); |
1da177e4 | 200 | break; |
f6b3ec23 BP |
201 | case MADV_REMOVE: |
202 | error = madvise_remove(vma, start, end); | |
203 | break; | |
1da177e4 LT |
204 | |
205 | case MADV_WILLNEED: | |
05b74384 | 206 | error = madvise_willneed(vma, prev, start, end); |
1da177e4 LT |
207 | break; |
208 | ||
209 | case MADV_DONTNEED: | |
05b74384 | 210 | error = madvise_dontneed(vma, prev, start, end); |
1da177e4 LT |
211 | break; |
212 | ||
213 | default: | |
214 | error = -EINVAL; | |
215 | break; | |
216 | } | |
1da177e4 LT |
217 | return error; |
218 | } | |
219 | ||
220 | /* | |
221 | * The madvise(2) system call. | |
222 | * | |
223 | * Applications can use madvise() to advise the kernel how it should | |
224 | * handle paging I/O in this VM area. The idea is to help the kernel | |
225 | * use appropriate read-ahead and caching techniques. The information | |
226 | * provided is advisory only, and can be safely disregarded by the | |
227 | * kernel without affecting the correct operation of the application. | |
228 | * | |
229 | * behavior values: | |
230 | * MADV_NORMAL - the default behavior is to read clusters. This | |
231 | * results in some read-ahead and read-behind. | |
232 | * MADV_RANDOM - the system should read the minimum amount of data | |
233 | * on any access, since it is unlikely that the appli- | |
234 | * cation will need more than what it asks for. | |
235 | * MADV_SEQUENTIAL - pages in the given range will probably be accessed | |
236 | * once, so they can be aggressively read ahead, and | |
237 | * can be freed soon after they are accessed. | |
238 | * MADV_WILLNEED - the application is notifying the system to read | |
239 | * some pages ahead. | |
240 | * MADV_DONTNEED - the application is finished with the given range, | |
241 | * so the kernel can free resources associated with it. | |
f6b3ec23 BP |
242 | * MADV_REMOVE - the application wants to free up the given range of |
243 | * pages and associated backing store. | |
1da177e4 LT |
244 | * |
245 | * return values: | |
246 | * zero - success | |
247 | * -EINVAL - start + len < 0, start is not page-aligned, | |
248 | * "behavior" is not a valid value, or application | |
249 | * is attempting to release locked or shared pages. | |
250 | * -ENOMEM - addresses in the specified range are not currently | |
251 | * mapped, or are outside the AS of the process. | |
252 | * -EIO - an I/O error occurred while paging in data. | |
253 | * -EBADF - map exists, but area maps something that isn't a file. | |
254 | * -EAGAIN - a kernel resource was temporarily unavailable. | |
255 | */ | |
256 | asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior) | |
257 | { | |
05b74384 PM |
258 | unsigned long end, tmp; |
259 | struct vm_area_struct * vma, *prev; | |
1da177e4 LT |
260 | int unmapped_error = 0; |
261 | int error = -EINVAL; | |
262 | size_t len; | |
263 | ||
264 | down_write(¤t->mm->mmap_sem); | |
265 | ||
266 | if (start & ~PAGE_MASK) | |
267 | goto out; | |
268 | len = (len_in + ~PAGE_MASK) & PAGE_MASK; | |
269 | ||
270 | /* Check to see whether len was rounded up from small -ve to zero */ | |
271 | if (len_in && !len) | |
272 | goto out; | |
273 | ||
274 | end = start + len; | |
275 | if (end < start) | |
276 | goto out; | |
277 | ||
278 | error = 0; | |
279 | if (end == start) | |
280 | goto out; | |
281 | ||
282 | /* | |
283 | * If the interval [start,end) covers some unmapped address | |
284 | * ranges, just ignore them, but return -ENOMEM at the end. | |
05b74384 | 285 | * - different from the way of handling in mlock etc. |
1da177e4 | 286 | */ |
05b74384 | 287 | vma = find_vma_prev(current->mm, start, &prev); |
836d5ffd HD |
288 | if (vma && start > vma->vm_start) |
289 | prev = vma; | |
290 | ||
1da177e4 LT |
291 | for (;;) { |
292 | /* Still start < end. */ | |
293 | error = -ENOMEM; | |
294 | if (!vma) | |
295 | goto out; | |
296 | ||
05b74384 | 297 | /* Here start < (end|vma->vm_end). */ |
1da177e4 LT |
298 | if (start < vma->vm_start) { |
299 | unmapped_error = -ENOMEM; | |
300 | start = vma->vm_start; | |
05b74384 PM |
301 | if (start >= end) |
302 | goto out; | |
1da177e4 LT |
303 | } |
304 | ||
05b74384 PM |
305 | /* Here vma->vm_start <= start < (end|vma->vm_end) */ |
306 | tmp = vma->vm_end; | |
307 | if (end < tmp) | |
308 | tmp = end; | |
1da177e4 | 309 | |
05b74384 PM |
310 | /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ |
311 | error = madvise_vma(vma, &prev, start, tmp, behavior); | |
1da177e4 LT |
312 | if (error) |
313 | goto out; | |
05b74384 PM |
314 | start = tmp; |
315 | if (start < prev->vm_end) | |
316 | start = prev->vm_end; | |
317 | error = unmapped_error; | |
318 | if (start >= end) | |
319 | goto out; | |
320 | vma = prev->vm_next; | |
1da177e4 | 321 | } |
1da177e4 LT |
322 | out: |
323 | up_write(¤t->mm->mmap_sem); | |
324 | return error; | |
325 | } |