4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/capability.h>
9 #include <linux/mman.h>
11 #include <linux/swap.h>
12 #include <linux/swapops.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/syscalls.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/rmap.h>
19 #include <linux/mmzone.h>
20 #include <linux/hugetlb.h>
24 int can_do_mlock(void)
26 if (capable(CAP_IPC_LOCK))
28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
32 EXPORT_SYMBOL(can_do_mlock);
34 #ifdef CONFIG_UNEVICTABLE_LRU
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
53 * LRU accounting for clear_page_mlock()
55 void __clear_page_mlock(struct page *page)
57 VM_BUG_ON(!PageLocked(page));
59 if (!page->mapping) { /* truncated ? */
63 dec_zone_page_state(page, NR_MLOCK);
64 count_vm_event(UNEVICTABLE_PGCLEARED);
65 if (!isolate_lru_page(page)) {
66 putback_lru_page(page);
69 * We lost the race. the page already moved to evictable list.
71 if (PageUnevictable(page))
72 count_vm_event(UNEVICTABLE_PGSTRANDED);
77 * Mark page as mlocked if not already.
78 * If page on LRU, isolate and putback to move to unevictable list.
80 void mlock_vma_page(struct page *page)
82 BUG_ON(!PageLocked(page));
84 if (!TestSetPageMlocked(page)) {
85 inc_zone_page_state(page, NR_MLOCK);
86 count_vm_event(UNEVICTABLE_PGMLOCKED);
87 if (!isolate_lru_page(page))
88 putback_lru_page(page);
93 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
96 * [in try_to_munlock()] and then attempt to isolate the page. We must
97 * isolate the page to keep others from messing with its unevictable
98 * and mlocked state while trying to munlock. However, we pre-clear the
99 * mlocked state anyway as we might lose the isolation race and we might
100 * not get another chance to clear PageMlocked. If we successfully
101 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
102 * mapping the page, it will restore the PageMlocked state, unless the page
103 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
104 * perhaps redundantly.
105 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
106 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
107 * either of which will restore the PageMlocked state by calling
108 * mlock_vma_page() above, if it can grab the vma's mmap sem.
110 static void munlock_vma_page(struct page *page)
112 BUG_ON(!PageLocked(page));
114 if (TestClearPageMlocked(page)) {
115 dec_zone_page_state(page, NR_MLOCK);
116 if (!isolate_lru_page(page)) {
117 int ret = try_to_munlock(page);
119 * did try_to_unlock() succeed or punt?
121 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
122 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
124 putback_lru_page(page);
127 * We lost the race. let try_to_unmap() deal
128 * with it. At least we get the page state and
129 * mlock stats right. However, page is still on
130 * the noreclaim list. We'll fix that up when
131 * the page is eventually freed or we scan the
134 if (PageUnevictable(page))
135 count_vm_event(UNEVICTABLE_PGSTRANDED);
137 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
143 * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
145 * @start: start address
147 * @mlock: 0 indicate munlock, otherwise mlock.
149 * If @mlock == 0, unlock an mlocked range;
150 * else mlock the range of pages. This takes care of making the pages present ,
153 * return 0 on success, negative error code on error.
155 * vma->vm_mm->mmap_sem must be held for at least read.
157 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
158 unsigned long start, unsigned long end,
161 struct mm_struct *mm = vma->vm_mm;
162 unsigned long addr = start;
163 struct page *pages[16]; /* 16 gives a reasonable batch */
164 int nr_pages = (end - start) / PAGE_SIZE;
168 VM_BUG_ON(start & ~PAGE_MASK);
169 VM_BUG_ON(end & ~PAGE_MASK);
170 VM_BUG_ON(start < vma->vm_start);
171 VM_BUG_ON(end > vma->vm_end);
172 VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
173 (atomic_read(&mm->mm_users) != 0));
176 * mlock: don't page populate if vma has PROT_NONE permission.
177 * munlock: always do munlock although the vma has PROT_NONE
178 * permission, or SIGKILL is pending.
181 gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
182 GUP_FLAGS_IGNORE_SIGKILL;
184 if (vma->vm_flags & VM_WRITE)
185 gup_flags |= GUP_FLAGS_WRITE;
187 while (nr_pages > 0) {
193 * get_user_pages makes pages present if we are
194 * setting mlock. and this extra reference count will
195 * disable migration of this page. However, page may
196 * still be truncated out from under us.
198 ret = __get_user_pages(current, mm, addr,
199 min_t(int, nr_pages, ARRAY_SIZE(pages)),
200 gup_flags, pages, NULL);
202 * This can happen for, e.g., VM_NONLINEAR regions before
203 * a page has been allocated and mapped at a given offset,
204 * or for addresses that map beyond end of a file.
205 * We'll mlock the the pages if/when they get faulted in.
211 * We know the vma is there, so the only time
212 * we cannot get a single page should be an
213 * error (ret < 0) case.
219 lru_add_drain(); /* push cached pages to LRU */
221 for (i = 0; i < ret; i++) {
222 struct page *page = pages[i];
226 * Because we lock page here and migration is blocked
227 * by the elevated reference, we need only check for
228 * page truncation (file-cache only).
232 mlock_vma_page(page);
234 munlock_vma_page(page);
237 put_page(page); /* ref from get_user_pages() */
240 * here we assume that get_user_pages() has given us
241 * a list of virtually contiguous pages.
243 addr += PAGE_SIZE; /* for next get_user_pages() */
249 return ret; /* count entire vma as locked_vm */
253 * convert get_user_pages() return value to posix mlock() error
255 static int __mlock_posix_error_return(long retval)
257 if (retval == -EFAULT)
259 else if (retval == -ENOMEM)
264 #else /* CONFIG_UNEVICTABLE_LRU */
267 * Just make pages present if VM_LOCKED. No-op if unlocking.
269 static long __mlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end,
273 if (mlock && (vma->vm_flags & VM_LOCKED))
274 return make_pages_present(start, end);
278 static inline int __mlock_posix_error_return(long retval)
283 #endif /* CONFIG_UNEVICTABLE_LRU */
286 * mlock_vma_pages_range() - mlock pages in specified vma range.
287 * @vma - the vma containing the specfied address range
288 * @start - starting address in @vma to mlock
289 * @end - end address [+1] in @vma to mlock
291 * For mmap()/mremap()/expansion of mlocked vma.
293 * return 0 on success for "normal" vmas.
295 * return number of pages [> 0] to be removed from locked_vm on success
298 long mlock_vma_pages_range(struct vm_area_struct *vma,
299 unsigned long start, unsigned long end)
301 int nr_pages = (end - start) / PAGE_SIZE;
302 BUG_ON(!(vma->vm_flags & VM_LOCKED));
305 * filter unlockable vmas
307 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
310 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
311 is_vm_hugetlb_page(vma) ||
312 vma == get_gate_vma(current))) {
314 __mlock_vma_pages_range(vma, start, end, 1);
316 /* Hide errors from mmap() and other callers */
321 * User mapped kernel pages or huge pages:
322 * make these pages present to populate the ptes, but
323 * fall thru' to reset VM_LOCKED--no need to unlock, and
324 * return nr_pages so these don't get counted against task's
325 * locked limit. huge pages are already counted against
328 make_pages_present(start, end);
331 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
332 return nr_pages; /* error or pages NOT mlocked */
337 * munlock_vma_pages_range() - munlock all pages in the vma range.'
338 * @vma - vma containing range to be munlock()ed.
339 * @start - start address in @vma of the range
340 * @end - end of range in @vma.
342 * For mremap(), munmap() and exit().
344 * Called with @vma VM_LOCKED.
346 * Returns with VM_LOCKED cleared. Callers must be prepared to
349 * We don't save and restore VM_LOCKED here because pages are
350 * still on lru. In unmap path, pages might be scanned by reclaim
351 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
352 * free them. This will result in freeing mlocked pages.
354 void munlock_vma_pages_range(struct vm_area_struct *vma,
355 unsigned long start, unsigned long end)
357 vma->vm_flags &= ~VM_LOCKED;
358 __mlock_vma_pages_range(vma, start, end, 0);
362 * mlock_fixup - handle mlock[all]/munlock[all] requests.
364 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
365 * munlock is a no-op. However, for some special vmas, we go ahead and
366 * populate the ptes via make_pages_present().
368 * For vmas that pass the filters, merge/split as appropriate.
370 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
371 unsigned long start, unsigned long end, unsigned int newflags)
373 struct mm_struct *mm = vma->vm_mm;
377 int lock = newflags & VM_LOCKED;
379 if (newflags == vma->vm_flags ||
380 (vma->vm_flags & (VM_IO | VM_PFNMAP)))
381 goto out; /* don't set VM_LOCKED, don't count */
383 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
384 is_vm_hugetlb_page(vma) ||
385 vma == get_gate_vma(current)) {
387 make_pages_present(start, end);
388 goto out; /* don't set VM_LOCKED, don't count */
391 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
392 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
393 vma->vm_file, pgoff, vma_policy(vma));
399 if (start != vma->vm_start) {
400 ret = split_vma(mm, vma, start, 1);
405 if (end != vma->vm_end) {
406 ret = split_vma(mm, vma, end, 0);
413 * Keep track of amount of locked VM.
415 nr_pages = (end - start) >> PAGE_SHIFT;
417 nr_pages = -nr_pages;
418 mm->locked_vm += nr_pages;
421 * vm_flags is protected by the mmap_sem held in write mode.
422 * It's okay if try_to_unmap_one unmaps a page just after we
423 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
425 vma->vm_flags = newflags;
428 ret = __mlock_vma_pages_range(vma, start, end, 1);
431 mm->locked_vm -= ret;
434 ret = __mlock_posix_error_return(ret); /* translate if needed */
436 __mlock_vma_pages_range(vma, start, end, 0);
444 static int do_mlock(unsigned long start, size_t len, int on)
446 unsigned long nstart, end, tmp;
447 struct vm_area_struct * vma, * prev;
450 len = PAGE_ALIGN(len);
456 vma = find_vma_prev(current->mm, start, &prev);
457 if (!vma || vma->vm_start > start)
460 if (start > vma->vm_start)
463 for (nstart = start ; ; ) {
464 unsigned int newflags;
466 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
468 newflags = vma->vm_flags | VM_LOCKED;
470 newflags &= ~VM_LOCKED;
475 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
479 if (nstart < prev->vm_end)
480 nstart = prev->vm_end;
485 if (!vma || vma->vm_start != nstart) {
493 SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
495 unsigned long locked;
496 unsigned long lock_limit;
502 lru_add_drain_all(); /* flush pagevec */
504 down_write(¤t->mm->mmap_sem);
505 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
508 locked = len >> PAGE_SHIFT;
509 locked += current->mm->locked_vm;
511 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
512 lock_limit >>= PAGE_SHIFT;
514 /* check against resource limits */
515 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
516 error = do_mlock(start, len, 1);
517 up_write(¤t->mm->mmap_sem);
521 SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
525 down_write(¤t->mm->mmap_sem);
526 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
528 ret = do_mlock(start, len, 0);
529 up_write(¤t->mm->mmap_sem);
533 static int do_mlockall(int flags)
535 struct vm_area_struct * vma, * prev = NULL;
536 unsigned int def_flags = 0;
538 if (flags & MCL_FUTURE)
539 def_flags = VM_LOCKED;
540 current->mm->def_flags = def_flags;
541 if (flags == MCL_FUTURE)
544 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
545 unsigned int newflags;
547 newflags = vma->vm_flags | VM_LOCKED;
548 if (!(flags & MCL_CURRENT))
549 newflags &= ~VM_LOCKED;
552 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
558 SYSCALL_DEFINE1(mlockall, int, flags)
560 unsigned long lock_limit;
563 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
570 lru_add_drain_all(); /* flush pagevec */
572 down_write(¤t->mm->mmap_sem);
574 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
575 lock_limit >>= PAGE_SHIFT;
578 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
579 capable(CAP_IPC_LOCK))
580 ret = do_mlockall(flags);
581 up_write(¤t->mm->mmap_sem);
586 SYSCALL_DEFINE0(munlockall)
590 down_write(¤t->mm->mmap_sem);
591 ret = do_mlockall(0);
592 up_write(¤t->mm->mmap_sem);
597 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
598 * shm segments) get accounted against the user_struct instead.
600 static DEFINE_SPINLOCK(shmlock_user_lock);
602 int user_shm_lock(size_t size, struct user_struct *user)
604 unsigned long lock_limit, locked;
607 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
608 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
609 if (lock_limit == RLIM_INFINITY)
611 lock_limit >>= PAGE_SHIFT;
612 spin_lock(&shmlock_user_lock);
614 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
617 user->locked_shm += locked;
620 spin_unlock(&shmlock_user_lock);
624 void user_shm_unlock(size_t size, struct user_struct *user)
626 spin_lock(&shmlock_user_lock);
627 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
628 spin_unlock(&shmlock_user_lock);
632 void *alloc_locked_buffer(size_t size)
634 unsigned long rlim, vm, pgsz;
637 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
639 down_write(¤t->mm->mmap_sem);
641 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
642 vm = current->mm->total_vm + pgsz;
646 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
647 vm = current->mm->locked_vm + pgsz;
651 buffer = kzalloc(size, GFP_KERNEL);
655 current->mm->total_vm += pgsz;
656 current->mm->locked_vm += pgsz;
659 up_write(¤t->mm->mmap_sem);
663 void release_locked_buffer(void *buffer, size_t size)
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
667 down_write(¤t->mm->mmap_sem);
669 current->mm->total_vm -= pgsz;
670 current->mm->locked_vm -= pgsz;
672 up_write(¤t->mm->mmap_sem);
675 void free_locked_buffer(void *buffer, size_t size)
677 release_locked_buffer(buffer, size);