4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie.
7 * kswapd added: 7.1.96 sct
8 * Removed kswapd_ctl limits, and swap out as many pages as needed
9 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
10 * Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
11 * Multiqueue VM started 5.8.00, Rik van Riel.
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
20 #include <linux/init.h>
21 #include <linux/highmem.h>
22 #include <linux/vmstat.h>
23 #include <linux/file.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/buffer_head.h> /* for try_to_release_page(),
27 buffer_heads_over_limit */
28 #include <linux/mm_inline.h>
29 #include <linux/pagevec.h>
30 #include <linux/backing-dev.h>
31 #include <linux/rmap.h>
32 #include <linux/topology.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/notifier.h>
36 #include <linux/rwsem.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 #include <linux/memcontrol.h>
42 #include <asm/tlbflush.h>
43 #include <asm/div64.h>
45 #include <linux/swapops.h>
50 /* Incremented by the number of inactive pages that were scanned */
51 unsigned long nr_scanned;
53 /* This context's GFP mask */
58 /* Can pages be swapped as part of reclaim? */
61 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
62 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
63 * In this context, it doesn't matter that we scan the
64 * whole list at once. */
69 int all_unreclaimable;
73 /* Which cgroup do we reclaim from */
74 struct mem_cgroup *mem_cgroup;
76 /* Pluggable isolate pages callback */
77 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
78 unsigned long *scanned, int order, int mode,
79 struct zone *z, struct mem_cgroup *mem_cont,
83 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
85 #ifdef ARCH_HAS_PREFETCH
86 #define prefetch_prev_lru_page(_page, _base, _field) \
88 if ((_page)->lru.prev != _base) { \
91 prev = lru_to_page(&(_page->lru)); \
92 prefetch(&prev->_field); \
96 #define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
99 #ifdef ARCH_HAS_PREFETCHW
100 #define prefetchw_prev_lru_page(_page, _base, _field) \
102 if ((_page)->lru.prev != _base) { \
105 prev = lru_to_page(&(_page->lru)); \
106 prefetchw(&prev->_field); \
110 #define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
114 * From 0 .. 100. Higher means more swappy.
116 int vm_swappiness = 60;
117 long vm_total_pages; /* The total number of pages which the VM controls */
119 static LIST_HEAD(shrinker_list);
120 static DECLARE_RWSEM(shrinker_rwsem);
123 * Add a shrinker callback to be called from the vm
125 void register_shrinker(struct shrinker *shrinker)
128 down_write(&shrinker_rwsem);
129 list_add_tail(&shrinker->list, &shrinker_list);
130 up_write(&shrinker_rwsem);
132 EXPORT_SYMBOL(register_shrinker);
137 void unregister_shrinker(struct shrinker *shrinker)
139 down_write(&shrinker_rwsem);
140 list_del(&shrinker->list);
141 up_write(&shrinker_rwsem);
143 EXPORT_SYMBOL(unregister_shrinker);
145 #define SHRINK_BATCH 128
147 * Call the shrink functions to age shrinkable caches
149 * Here we assume it costs one seek to replace a lru page and that it also
150 * takes a seek to recreate a cache object. With this in mind we age equal
151 * percentages of the lru and ageable caches. This should balance the seeks
152 * generated by these structures.
154 * If the vm encountered mapped pages on the LRU it increase the pressure on
155 * slab to avoid swapping.
157 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
159 * `lru_pages' represents the number of on-LRU pages in all the zones which
160 * are eligible for the caller's allocation attempt. It is used for balancing
161 * slab reclaim versus page reclaim.
163 * Returns the number of slab objects which we shrunk.
165 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
166 unsigned long lru_pages)
168 struct shrinker *shrinker;
169 unsigned long ret = 0;
172 scanned = SWAP_CLUSTER_MAX;
174 if (!down_read_trylock(&shrinker_rwsem))
175 return 1; /* Assume we'll be able to shrink next time */
177 list_for_each_entry(shrinker, &shrinker_list, list) {
178 unsigned long long delta;
179 unsigned long total_scan;
180 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
182 delta = (4 * scanned) / shrinker->seeks;
184 do_div(delta, lru_pages + 1);
185 shrinker->nr += delta;
186 if (shrinker->nr < 0) {
187 printk(KERN_ERR "%s: nr=%ld\n",
188 __FUNCTION__, shrinker->nr);
189 shrinker->nr = max_pass;
193 * Avoid risking looping forever due to too large nr value:
194 * never try to free more than twice the estimate number of
197 if (shrinker->nr > max_pass * 2)
198 shrinker->nr = max_pass * 2;
200 total_scan = shrinker->nr;
203 while (total_scan >= SHRINK_BATCH) {
204 long this_scan = SHRINK_BATCH;
208 nr_before = (*shrinker->shrink)(0, gfp_mask);
209 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
210 if (shrink_ret == -1)
212 if (shrink_ret < nr_before)
213 ret += nr_before - shrink_ret;
214 count_vm_events(SLABS_SCANNED, this_scan);
215 total_scan -= this_scan;
220 shrinker->nr += total_scan;
222 up_read(&shrinker_rwsem);
226 /* Called without lock on whether page is mapped, so answer is unstable */
227 static inline int page_mapping_inuse(struct page *page)
229 struct address_space *mapping;
231 /* Page is in somebody's page tables. */
232 if (page_mapped(page))
235 /* Be more reluctant to reclaim swapcache than pagecache */
236 if (PageSwapCache(page))
239 mapping = page_mapping(page);
243 /* File is mmap'd by somebody? */
244 return mapping_mapped(mapping);
247 static inline int is_page_cache_freeable(struct page *page)
249 return page_count(page) - !!PagePrivate(page) == 2;
252 static int may_write_to_queue(struct backing_dev_info *bdi)
254 if (current->flags & PF_SWAPWRITE)
256 if (!bdi_write_congested(bdi))
258 if (bdi == current->backing_dev_info)
264 * We detected a synchronous write error writing a page out. Probably
265 * -ENOSPC. We need to propagate that into the address_space for a subsequent
266 * fsync(), msync() or close().
268 * The tricky part is that after writepage we cannot touch the mapping: nothing
269 * prevents it from being freed up. But we have a ref on the page and once
270 * that page is locked, the mapping is pinned.
272 * We're allowed to run sleeping lock_page() here because we know the caller has
275 static void handle_write_error(struct address_space *mapping,
276 struct page *page, int error)
279 if (page_mapping(page) == mapping)
280 mapping_set_error(mapping, error);
284 /* Request for sync pageout. */
290 /* possible outcome of pageout() */
292 /* failed to write page out, page is locked */
294 /* move page to the active list, page is locked */
296 /* page has been sent to the disk successfully, page is unlocked */
298 /* page is clean and locked */
303 * pageout is called by shrink_page_list() for each dirty page.
304 * Calls ->writepage().
306 static pageout_t pageout(struct page *page, struct address_space *mapping,
307 enum pageout_io sync_writeback)
310 * If the page is dirty, only perform writeback if that write
311 * will be non-blocking. To prevent this allocation from being
312 * stalled by pagecache activity. But note that there may be
313 * stalls if we need to run get_block(). We could test
314 * PagePrivate for that.
316 * If this process is currently in generic_file_write() against
317 * this page's queue, we can perform writeback even if that
320 * If the page is swapcache, write it back even if that would
321 * block, for some throttling. This happens by accident, because
322 * swap_backing_dev_info is bust: it doesn't reflect the
323 * congestion state of the swapdevs. Easy to fix, if needed.
324 * See swapfile.c:page_queue_congested().
326 if (!is_page_cache_freeable(page))
330 * Some data journaling orphaned pages can have
331 * page->mapping == NULL while being dirty with clean buffers.
333 if (PagePrivate(page)) {
334 if (try_to_free_buffers(page)) {
335 ClearPageDirty(page);
336 printk("%s: orphaned page\n", __FUNCTION__);
342 if (mapping->a_ops->writepage == NULL)
343 return PAGE_ACTIVATE;
344 if (!may_write_to_queue(mapping->backing_dev_info))
347 if (clear_page_dirty_for_io(page)) {
349 struct writeback_control wbc = {
350 .sync_mode = WB_SYNC_NONE,
351 .nr_to_write = SWAP_CLUSTER_MAX,
353 .range_end = LLONG_MAX,
358 SetPageReclaim(page);
359 res = mapping->a_ops->writepage(page, &wbc);
361 handle_write_error(mapping, page, res);
362 if (res == AOP_WRITEPAGE_ACTIVATE) {
363 ClearPageReclaim(page);
364 return PAGE_ACTIVATE;
368 * Wait on writeback if requested to. This happens when
369 * direct reclaiming a large contiguous area and the
370 * first attempt to free a range of pages fails.
372 if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
373 wait_on_page_writeback(page);
375 if (!PageWriteback(page)) {
376 /* synchronous write or broken a_ops? */
377 ClearPageReclaim(page);
379 inc_zone_page_state(page, NR_VMSCAN_WRITE);
387 * Attempt to detach a locked page from its ->mapping. If it is dirty or if
388 * someone else has a ref on the page, abort and return 0. If it was
389 * successfully detached, return 1. Assumes the caller has a single ref on
392 int remove_mapping(struct address_space *mapping, struct page *page)
394 BUG_ON(!PageLocked(page));
395 BUG_ON(mapping != page_mapping(page));
397 write_lock_irq(&mapping->tree_lock);
399 * The non racy check for a busy page.
401 * Must be careful with the order of the tests. When someone has
402 * a ref to the page, it may be possible that they dirty it then
403 * drop the reference. So if PageDirty is tested before page_count
404 * here, then the following race may occur:
406 * get_user_pages(&page);
407 * [user mapping goes away]
409 * !PageDirty(page) [good]
410 * SetPageDirty(page);
412 * !page_count(page) [good, discard it]
414 * [oops, our write_to data is lost]
416 * Reversing the order of the tests ensures such a situation cannot
417 * escape unnoticed. The smp_rmb is needed to ensure the page->flags
418 * load is not satisfied before that of page->_count.
420 * Note that if SetPageDirty is always performed via set_page_dirty,
421 * and thus under tree_lock, then this ordering is not required.
423 if (unlikely(page_count(page) != 2))
426 if (unlikely(PageDirty(page)))
429 if (PageSwapCache(page)) {
430 swp_entry_t swap = { .val = page_private(page) };
431 __delete_from_swap_cache(page);
432 write_unlock_irq(&mapping->tree_lock);
434 __put_page(page); /* The pagecache ref */
438 __remove_from_page_cache(page);
439 write_unlock_irq(&mapping->tree_lock);
444 write_unlock_irq(&mapping->tree_lock);
449 * shrink_page_list() returns the number of reclaimed pages
451 static unsigned long shrink_page_list(struct list_head *page_list,
452 struct scan_control *sc,
453 enum pageout_io sync_writeback)
455 LIST_HEAD(ret_pages);
456 struct pagevec freed_pvec;
458 unsigned long nr_reclaimed = 0;
462 pagevec_init(&freed_pvec, 1);
463 while (!list_empty(page_list)) {
464 struct address_space *mapping;
471 page = lru_to_page(page_list);
472 list_del(&page->lru);
474 if (TestSetPageLocked(page))
477 VM_BUG_ON(PageActive(page));
481 if (!sc->may_swap && page_mapped(page))
484 /* Double the slab pressure for mapped and swapcache pages */
485 if (page_mapped(page) || PageSwapCache(page))
488 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
489 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
491 if (PageWriteback(page)) {
493 * Synchronous reclaim is performed in two passes,
494 * first an asynchronous pass over the list to
495 * start parallel writeback, and a second synchronous
496 * pass to wait for the IO to complete. Wait here
497 * for any page for which writeback has already
500 if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
501 wait_on_page_writeback(page);
506 referenced = page_referenced(page, 1, sc->mem_cgroup);
507 /* In active use or really unfreeable? Activate it. */
508 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
509 referenced && page_mapping_inuse(page))
510 goto activate_locked;
514 * Anonymous process memory has backing store?
515 * Try to allocate it some swap space here.
517 if (PageAnon(page) && !PageSwapCache(page))
518 if (!add_to_swap(page, GFP_ATOMIC))
519 goto activate_locked;
520 #endif /* CONFIG_SWAP */
522 mapping = page_mapping(page);
525 * The page is mapped into the page tables of one or more
526 * processes. Try to unmap it here.
528 if (page_mapped(page) && mapping) {
529 switch (try_to_unmap(page, 0)) {
531 goto activate_locked;
535 ; /* try to free the page below */
539 if (PageDirty(page)) {
540 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
544 if (!sc->may_writepage)
547 /* Page is dirty, try to write it out here */
548 switch (pageout(page, mapping, sync_writeback)) {
552 goto activate_locked;
554 if (PageWriteback(page) || PageDirty(page))
557 * A synchronous write - probably a ramdisk. Go
558 * ahead and try to reclaim the page.
560 if (TestSetPageLocked(page))
562 if (PageDirty(page) || PageWriteback(page))
564 mapping = page_mapping(page);
566 ; /* try to free the page below */
571 * If the page has buffers, try to free the buffer mappings
572 * associated with this page. If we succeed we try to free
575 * We do this even if the page is PageDirty().
576 * try_to_release_page() does not perform I/O, but it is
577 * possible for a page to have PageDirty set, but it is actually
578 * clean (all its buffers are clean). This happens if the
579 * buffers were written out directly, with submit_bh(). ext3
580 * will do this, as well as the blockdev mapping.
581 * try_to_release_page() will discover that cleanness and will
582 * drop the buffers and mark the page clean - it can be freed.
584 * Rarely, pages can have buffers and no ->mapping. These are
585 * the pages which were not successfully invalidated in
586 * truncate_complete_page(). We try to drop those buffers here
587 * and if that worked, and the page is no longer mapped into
588 * process address space (page_count == 1) it can be freed.
589 * Otherwise, leave the page on the LRU so it is swappable.
591 if (PagePrivate(page)) {
592 if (!try_to_release_page(page, sc->gfp_mask))
593 goto activate_locked;
594 if (!mapping && page_count(page) == 1)
598 if (!mapping || !remove_mapping(mapping, page))
604 if (!pagevec_add(&freed_pvec, page))
605 __pagevec_release_nonlru(&freed_pvec);
614 list_add(&page->lru, &ret_pages);
615 VM_BUG_ON(PageLRU(page));
617 list_splice(&ret_pages, page_list);
618 if (pagevec_count(&freed_pvec))
619 __pagevec_release_nonlru(&freed_pvec);
620 count_vm_events(PGACTIVATE, pgactivate);
624 /* LRU Isolation modes. */
625 #define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
626 #define ISOLATE_ACTIVE 1 /* Isolate active pages. */
627 #define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
630 * Attempt to remove the specified page from its LRU. Only take this page
631 * if it is of the appropriate PageActive status. Pages which are being
632 * freed elsewhere are also ignored.
634 * page: page to consider
635 * mode: one of the LRU isolation modes defined above
637 * returns 0 on success, -ve errno on failure.
639 int __isolate_lru_page(struct page *page, int mode)
643 /* Only take pages on the LRU. */
648 * When checking the active state, we need to be sure we are
649 * dealing with comparible boolean values. Take the logical not
652 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
656 if (likely(get_page_unless_zero(page))) {
658 * Be careful not to clear PageLRU until after we're
659 * sure the page is not being freed elsewhere -- the
660 * page release code relies on it.
670 * zone->lru_lock is heavily contended. Some of the functions that
671 * shrink the lists perform better by taking out a batch of pages
672 * and working on them outside the LRU lock.
674 * For pagecache intensive workloads, this function is the hottest
675 * spot in the kernel (apart from copy_*_user functions).
677 * Appropriate locks must be held before calling this function.
679 * @nr_to_scan: The number of pages to look through on the list.
680 * @src: The LRU list to pull pages off.
681 * @dst: The temp list to put pages on to.
682 * @scanned: The number of pages that were scanned.
683 * @order: The caller's attempted allocation order
684 * @mode: One of the LRU isolation modes
686 * returns how many pages were moved onto *@dst.
688 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
689 struct list_head *src, struct list_head *dst,
690 unsigned long *scanned, int order, int mode)
692 unsigned long nr_taken = 0;
695 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
698 unsigned long end_pfn;
699 unsigned long page_pfn;
702 page = lru_to_page(src);
703 prefetchw_prev_lru_page(page, src, flags);
705 VM_BUG_ON(!PageLRU(page));
707 switch (__isolate_lru_page(page, mode)) {
709 list_move(&page->lru, dst);
714 /* else it is being freed elsewhere */
715 list_move(&page->lru, src);
726 * Attempt to take all pages in the order aligned region
727 * surrounding the tag page. Only take those pages of
728 * the same active state as that tag page. We may safely
729 * round the target page pfn down to the requested order
730 * as the mem_map is guarenteed valid out to MAX_ORDER,
731 * where that page is in a different zone we will detect
732 * it from its zone id and abort this block scan.
734 zone_id = page_zone_id(page);
735 page_pfn = page_to_pfn(page);
736 pfn = page_pfn & ~((1 << order) - 1);
737 end_pfn = pfn + (1 << order);
738 for (; pfn < end_pfn; pfn++) {
739 struct page *cursor_page;
741 /* The target page is in the block, ignore it. */
742 if (unlikely(pfn == page_pfn))
745 /* Avoid holes within the zone. */
746 if (unlikely(!pfn_valid_within(pfn)))
749 cursor_page = pfn_to_page(pfn);
750 /* Check that we have not crossed a zone boundary. */
751 if (unlikely(page_zone_id(cursor_page) != zone_id))
753 switch (__isolate_lru_page(cursor_page, mode)) {
755 list_move(&cursor_page->lru, dst);
761 /* else it is being freed elsewhere */
762 list_move(&cursor_page->lru, src);
773 static unsigned long isolate_pages_global(unsigned long nr,
774 struct list_head *dst,
775 unsigned long *scanned, int order,
776 int mode, struct zone *z,
777 struct mem_cgroup *mem_cont,
781 return isolate_lru_pages(nr, &z->active_list, dst,
782 scanned, order, mode);
784 return isolate_lru_pages(nr, &z->inactive_list, dst,
785 scanned, order, mode);
789 * clear_active_flags() is a helper for shrink_active_list(), clearing
790 * any active bits from the pages in the list.
792 static unsigned long clear_active_flags(struct list_head *page_list)
797 list_for_each_entry(page, page_list, lru)
798 if (PageActive(page)) {
799 ClearPageActive(page);
807 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
810 static unsigned long shrink_inactive_list(unsigned long max_scan,
811 struct zone *zone, struct scan_control *sc)
813 LIST_HEAD(page_list);
815 unsigned long nr_scanned = 0;
816 unsigned long nr_reclaimed = 0;
818 pagevec_init(&pvec, 1);
821 spin_lock_irq(&zone->lru_lock);
824 unsigned long nr_taken;
825 unsigned long nr_scan;
826 unsigned long nr_freed;
827 unsigned long nr_active;
829 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
830 &page_list, &nr_scan, sc->order,
831 (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
832 ISOLATE_BOTH : ISOLATE_INACTIVE,
833 zone, sc->mem_cgroup, 0);
834 nr_active = clear_active_flags(&page_list);
835 __count_vm_events(PGDEACTIVATE, nr_active);
837 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
838 __mod_zone_page_state(zone, NR_INACTIVE,
839 -(nr_taken - nr_active));
840 zone->pages_scanned += nr_scan;
841 spin_unlock_irq(&zone->lru_lock);
843 nr_scanned += nr_scan;
844 nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
847 * If we are direct reclaiming for contiguous pages and we do
848 * not reclaim everything in the list, try again and wait
849 * for IO to complete. This will stall high-order allocations
850 * but that should be acceptable to the caller
852 if (nr_freed < nr_taken && !current_is_kswapd() &&
853 sc->order > PAGE_ALLOC_COSTLY_ORDER) {
854 congestion_wait(WRITE, HZ/10);
857 * The attempt at page out may have made some
858 * of the pages active, mark them inactive again.
860 nr_active = clear_active_flags(&page_list);
861 count_vm_events(PGDEACTIVATE, nr_active);
863 nr_freed += shrink_page_list(&page_list, sc,
867 nr_reclaimed += nr_freed;
869 if (current_is_kswapd()) {
870 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
871 __count_vm_events(KSWAPD_STEAL, nr_freed);
873 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
874 __count_zone_vm_events(PGSTEAL, zone, nr_freed);
879 spin_lock(&zone->lru_lock);
881 * Put back any unfreeable pages.
883 while (!list_empty(&page_list)) {
884 page = lru_to_page(&page_list);
885 VM_BUG_ON(PageLRU(page));
887 list_del(&page->lru);
888 if (PageActive(page))
889 add_page_to_active_list(zone, page);
891 add_page_to_inactive_list(zone, page);
892 if (!pagevec_add(&pvec, page)) {
893 spin_unlock_irq(&zone->lru_lock);
894 __pagevec_release(&pvec);
895 spin_lock_irq(&zone->lru_lock);
898 } while (nr_scanned < max_scan);
899 spin_unlock(&zone->lru_lock);
902 pagevec_release(&pvec);
907 * We are about to scan this zone at a certain priority level. If that priority
908 * level is smaller (ie: more urgent) than the previous priority, then note
909 * that priority level within the zone. This is done so that when the next
910 * process comes in to scan this zone, it will immediately start out at this
911 * priority level rather than having to build up its own scanning priority.
912 * Here, this priority affects only the reclaim-mapped threshold.
914 static inline void note_zone_scanning_priority(struct zone *zone, int priority)
916 if (priority < zone->prev_priority)
917 zone->prev_priority = priority;
920 static inline int zone_is_near_oom(struct zone *zone)
922 return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
923 + zone_page_state(zone, NR_INACTIVE))*3;
927 * This moves pages from the active list to the inactive list.
929 * We move them the other way if the page is referenced by one or more
930 * processes, from rmap.
932 * If the pages are mostly unmapped, the processing is fast and it is
933 * appropriate to hold zone->lru_lock across the whole operation. But if
934 * the pages are mapped, the processing is slow (page_referenced()) so we
935 * should drop zone->lru_lock around each page. It's impossible to balance
936 * this, so instead we remove the pages from the LRU while processing them.
937 * It is safe to rely on PG_active against the non-LRU pages in here because
938 * nobody will play with that bit on a non-LRU page.
940 * The downside is that we have to touch page->_count against each page.
941 * But we had to alter page->flags anyway.
943 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
944 struct scan_control *sc, int priority)
946 unsigned long pgmoved;
947 int pgdeactivate = 0;
948 unsigned long pgscanned;
949 LIST_HEAD(l_hold); /* The pages which were snipped off */
950 LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
951 LIST_HEAD(l_active); /* Pages to go onto the active_list */
954 int reclaim_mapped = 0;
962 if (zone_is_near_oom(zone))
963 goto force_reclaim_mapped;
966 * `distress' is a measure of how much trouble we're having
967 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
969 distress = 100 >> min(zone->prev_priority, priority);
972 * The point of this algorithm is to decide when to start
973 * reclaiming mapped memory instead of just pagecache. Work out
977 mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
978 global_page_state(NR_ANON_PAGES)) * 100) /
982 * Now decide how much we really want to unmap some pages. The
983 * mapped ratio is downgraded - just because there's a lot of
984 * mapped memory doesn't necessarily mean that page reclaim
987 * The distress ratio is important - we don't want to start
990 * A 100% value of vm_swappiness overrides this algorithm
993 swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
996 * If there's huge imbalance between active and inactive
997 * (think active 100 times larger than inactive) we should
998 * become more permissive, or the system will take too much
999 * cpu before it start swapping during memory pressure.
1000 * Distress is about avoiding early-oom, this is about
1001 * making swappiness graceful despite setting it to low
1004 * Avoid div by zero with nr_inactive+1, and max resulting
1005 * value is vm_total_pages.
1007 imbalance = zone_page_state(zone, NR_ACTIVE);
1008 imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
1011 * Reduce the effect of imbalance if swappiness is low,
1012 * this means for a swappiness very low, the imbalance
1013 * must be much higher than 100 for this logic to make
1016 * Max temporary value is vm_total_pages*100.
1018 imbalance *= (vm_swappiness + 1);
1022 * If not much of the ram is mapped, makes the imbalance
1023 * less relevant, it's high priority we refill the inactive
1024 * list with mapped pages only in presence of high ratio of
1027 * Max temporary value is vm_total_pages*100.
1029 imbalance *= mapped_ratio;
1032 /* apply imbalance feedback to swap_tendency */
1033 swap_tendency += imbalance;
1036 * Now use this metric to decide whether to start moving mapped
1037 * memory onto the inactive list.
1039 if (swap_tendency >= 100)
1040 force_reclaim_mapped:
1045 spin_lock_irq(&zone->lru_lock);
1046 pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1047 ISOLATE_ACTIVE, zone,
1049 zone->pages_scanned += pgscanned;
1050 __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
1051 spin_unlock_irq(&zone->lru_lock);
1053 while (!list_empty(&l_hold)) {
1055 page = lru_to_page(&l_hold);
1056 list_del(&page->lru);
1057 if (page_mapped(page)) {
1058 if (!reclaim_mapped ||
1059 (total_swap_pages == 0 && PageAnon(page)) ||
1060 page_referenced(page, 0, sc->mem_cgroup)) {
1061 list_add(&page->lru, &l_active);
1065 list_add(&page->lru, &l_inactive);
1068 pagevec_init(&pvec, 1);
1070 spin_lock_irq(&zone->lru_lock);
1071 while (!list_empty(&l_inactive)) {
1072 page = lru_to_page(&l_inactive);
1073 prefetchw_prev_lru_page(page, &l_inactive, flags);
1074 VM_BUG_ON(PageLRU(page));
1076 VM_BUG_ON(!PageActive(page));
1077 ClearPageActive(page);
1079 list_move(&page->lru, &zone->inactive_list);
1080 mem_cgroup_move_lists(page_get_page_cgroup(page), false);
1082 if (!pagevec_add(&pvec, page)) {
1083 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1084 spin_unlock_irq(&zone->lru_lock);
1085 pgdeactivate += pgmoved;
1087 if (buffer_heads_over_limit)
1088 pagevec_strip(&pvec);
1089 __pagevec_release(&pvec);
1090 spin_lock_irq(&zone->lru_lock);
1093 __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1094 pgdeactivate += pgmoved;
1095 if (buffer_heads_over_limit) {
1096 spin_unlock_irq(&zone->lru_lock);
1097 pagevec_strip(&pvec);
1098 spin_lock_irq(&zone->lru_lock);
1102 while (!list_empty(&l_active)) {
1103 page = lru_to_page(&l_active);
1104 prefetchw_prev_lru_page(page, &l_active, flags);
1105 VM_BUG_ON(PageLRU(page));
1107 VM_BUG_ON(!PageActive(page));
1108 list_move(&page->lru, &zone->active_list);
1109 mem_cgroup_move_lists(page_get_page_cgroup(page), true);
1111 if (!pagevec_add(&pvec, page)) {
1112 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1114 spin_unlock_irq(&zone->lru_lock);
1115 __pagevec_release(&pvec);
1116 spin_lock_irq(&zone->lru_lock);
1119 __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1121 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1122 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1123 spin_unlock_irq(&zone->lru_lock);
1125 pagevec_release(&pvec);
1129 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1131 static unsigned long shrink_zone(int priority, struct zone *zone,
1132 struct scan_control *sc)
1134 unsigned long nr_active;
1135 unsigned long nr_inactive;
1136 unsigned long nr_to_scan;
1137 unsigned long nr_reclaimed = 0;
1140 * Add one to `nr_to_scan' just to make sure that the kernel will
1141 * slowly sift through the active list.
1143 zone->nr_scan_active +=
1144 (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1145 nr_active = zone->nr_scan_active;
1146 if (nr_active >= sc->swap_cluster_max)
1147 zone->nr_scan_active = 0;
1151 zone->nr_scan_inactive +=
1152 (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1153 nr_inactive = zone->nr_scan_inactive;
1154 if (nr_inactive >= sc->swap_cluster_max)
1155 zone->nr_scan_inactive = 0;
1159 while (nr_active || nr_inactive) {
1161 nr_to_scan = min(nr_active,
1162 (unsigned long)sc->swap_cluster_max);
1163 nr_active -= nr_to_scan;
1164 shrink_active_list(nr_to_scan, zone, sc, priority);
1168 nr_to_scan = min(nr_inactive,
1169 (unsigned long)sc->swap_cluster_max);
1170 nr_inactive -= nr_to_scan;
1171 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1176 throttle_vm_writeout(sc->gfp_mask);
1177 return nr_reclaimed;
1181 * This is the direct reclaim path, for page-allocating processes. We only
1182 * try to reclaim pages from zones which will satisfy the caller's allocation
1185 * We reclaim from a zone even if that zone is over pages_high. Because:
1186 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1188 * b) The zones may be over pages_high but they must go *over* pages_high to
1189 * satisfy the `incremental min' zone defense algorithm.
1191 * Returns the number of reclaimed pages.
1193 * If a zone is deemed to be full of pinned pages then just give it a light
1194 * scan then give up on it.
1196 static unsigned long shrink_zones(int priority, struct zone **zones,
1197 struct scan_control *sc)
1199 unsigned long nr_reclaimed = 0;
1202 sc->all_unreclaimable = 1;
1203 for (i = 0; zones[i] != NULL; i++) {
1204 struct zone *zone = zones[i];
1206 if (!populated_zone(zone))
1209 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1212 note_zone_scanning_priority(zone, priority);
1214 if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY)
1215 continue; /* Let kswapd poll it */
1217 sc->all_unreclaimable = 0;
1219 nr_reclaimed += shrink_zone(priority, zone, sc);
1221 return nr_reclaimed;
1225 * This is the main entry point to direct page reclaim.
1227 * If a full scan of the inactive list fails to free enough memory then we
1228 * are "out of memory" and something needs to be killed.
1230 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1231 * high - the zone may be full of dirty or under-writeback pages, which this
1232 * caller can't do much about. We kick pdflush and take explicit naps in the
1233 * hope that some of these pages can be written. But if the allocating task
1234 * holds filesystem locks which prevent writeout this might not work, and the
1235 * allocation attempt will fail.
1237 static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
1238 struct scan_control *sc)
1242 unsigned long total_scanned = 0;
1243 unsigned long nr_reclaimed = 0;
1244 struct reclaim_state *reclaim_state = current->reclaim_state;
1245 unsigned long lru_pages = 0;
1248 count_vm_event(ALLOCSTALL);
1250 for (i = 0; zones[i] != NULL; i++) {
1251 struct zone *zone = zones[i];
1253 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1256 lru_pages += zone_page_state(zone, NR_ACTIVE)
1257 + zone_page_state(zone, NR_INACTIVE);
1260 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1263 disable_swap_token();
1264 nr_reclaimed += shrink_zones(priority, zones, sc);
1266 * Don't shrink slabs when reclaiming memory from
1267 * over limit cgroups
1269 if (sc->mem_cgroup == NULL)
1270 shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
1271 if (reclaim_state) {
1272 nr_reclaimed += reclaim_state->reclaimed_slab;
1273 reclaim_state->reclaimed_slab = 0;
1275 total_scanned += sc->nr_scanned;
1276 if (nr_reclaimed >= sc->swap_cluster_max) {
1282 * Try to write back as many pages as we just scanned. This
1283 * tends to cause slow streaming writers to write data to the
1284 * disk smoothly, at the dirtying rate, which is nice. But
1285 * that's undesirable in laptop mode, where we *want* lumpy
1286 * writeout. So in laptop mode, write out the whole world.
1288 if (total_scanned > sc->swap_cluster_max +
1289 sc->swap_cluster_max / 2) {
1290 wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1291 sc->may_writepage = 1;
1294 /* Take a nap, wait for some writeback to complete */
1295 if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1296 congestion_wait(WRITE, HZ/10);
1298 /* top priority shrink_caches still had more to do? don't OOM, then */
1299 if (!sc->all_unreclaimable && sc->mem_cgroup == NULL)
1303 * Now that we've scanned all the zones at this priority level, note
1304 * that level within the zone so that the next thread which performs
1305 * scanning of this zone will immediately start out at this priority
1306 * level. This affects only the decision whether or not to bring
1307 * mapped pages onto the inactive list.
1311 for (i = 0; zones[i] != NULL; i++) {
1312 struct zone *zone = zones[i];
1314 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1317 zone->prev_priority = priority;
1322 unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
1324 struct scan_control sc = {
1325 .gfp_mask = gfp_mask,
1326 .may_writepage = !laptop_mode,
1327 .swap_cluster_max = SWAP_CLUSTER_MAX,
1329 .swappiness = vm_swappiness,
1332 .isolate_pages = isolate_pages_global,
1335 return do_try_to_free_pages(zones, gfp_mask, &sc);
1338 #ifdef CONFIG_CGROUP_MEM_CONT
1340 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1343 struct scan_control sc = {
1344 .gfp_mask = gfp_mask,
1345 .may_writepage = !laptop_mode,
1347 .swap_cluster_max = SWAP_CLUSTER_MAX,
1348 .swappiness = vm_swappiness,
1350 .mem_cgroup = mem_cont,
1351 .isolate_pages = mem_cgroup_isolate_pages,
1354 struct zone **zones;
1355 int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
1357 for_each_online_node(node) {
1358 zones = NODE_DATA(node)->node_zonelists[target_zone].zones;
1359 if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
1367 * For kswapd, balance_pgdat() will work across all this node's zones until
1368 * they are all at pages_high.
1370 * Returns the number of pages which were actually freed.
1372 * There is special handling here for zones which are full of pinned pages.
1373 * This can happen if the pages are all mlocked, or if they are all used by
1374 * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
1375 * What we do is to detect the case where all pages in the zone have been
1376 * scanned twice and there has been zero successful reclaim. Mark the zone as
1377 * dead and from now on, only perform a short scan. Basically we're polling
1378 * the zone for when the problem goes away.
1380 * kswapd scans the zones in the highmem->normal->dma direction. It skips
1381 * zones which have free_pages > pages_high, but once a zone is found to have
1382 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1383 * of the number of free pages in the lower zones. This interoperates with
1384 * the page allocator fallback scheme to ensure that aging of pages is balanced
1387 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1392 unsigned long total_scanned;
1393 unsigned long nr_reclaimed;
1394 struct reclaim_state *reclaim_state = current->reclaim_state;
1395 struct scan_control sc = {
1396 .gfp_mask = GFP_KERNEL,
1398 .swap_cluster_max = SWAP_CLUSTER_MAX,
1399 .swappiness = vm_swappiness,
1402 .isolate_pages = isolate_pages_global,
1405 * temp_priority is used to remember the scanning priority at which
1406 * this zone was successfully refilled to free_pages == pages_high.
1408 int temp_priority[MAX_NR_ZONES];
1413 sc.may_writepage = !laptop_mode;
1414 count_vm_event(PAGEOUTRUN);
1416 for (i = 0; i < pgdat->nr_zones; i++)
1417 temp_priority[i] = DEF_PRIORITY;
1419 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1420 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1421 unsigned long lru_pages = 0;
1423 /* The swap token gets in the way of swapout... */
1425 disable_swap_token();
1430 * Scan in the highmem->dma direction for the highest
1431 * zone which needs scanning
1433 for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1434 struct zone *zone = pgdat->node_zones + i;
1436 if (!populated_zone(zone))
1439 if (zone_is_all_unreclaimable(zone) &&
1440 priority != DEF_PRIORITY)
1443 if (!zone_watermark_ok(zone, order, zone->pages_high,
1452 for (i = 0; i <= end_zone; i++) {
1453 struct zone *zone = pgdat->node_zones + i;
1455 lru_pages += zone_page_state(zone, NR_ACTIVE)
1456 + zone_page_state(zone, NR_INACTIVE);
1460 * Now scan the zone in the dma->highmem direction, stopping
1461 * at the last zone which needs scanning.
1463 * We do this because the page allocator works in the opposite
1464 * direction. This prevents the page allocator from allocating
1465 * pages behind kswapd's direction of progress, which would
1466 * cause too much scanning of the lower zones.
1468 for (i = 0; i <= end_zone; i++) {
1469 struct zone *zone = pgdat->node_zones + i;
1472 if (!populated_zone(zone))
1475 if (zone_is_all_unreclaimable(zone) &&
1476 priority != DEF_PRIORITY)
1479 if (!zone_watermark_ok(zone, order, zone->pages_high,
1482 temp_priority[i] = priority;
1484 note_zone_scanning_priority(zone, priority);
1486 * We put equal pressure on every zone, unless one
1487 * zone has way too many pages free already.
1489 if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1491 nr_reclaimed += shrink_zone(priority, zone, &sc);
1492 reclaim_state->reclaimed_slab = 0;
1493 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1495 nr_reclaimed += reclaim_state->reclaimed_slab;
1496 total_scanned += sc.nr_scanned;
1497 if (zone_is_all_unreclaimable(zone))
1499 if (nr_slab == 0 && zone->pages_scanned >=
1500 (zone_page_state(zone, NR_ACTIVE)
1501 + zone_page_state(zone, NR_INACTIVE)) * 6)
1503 ZONE_ALL_UNRECLAIMABLE);
1505 * If we've done a decent amount of scanning and
1506 * the reclaim ratio is low, start doing writepage
1507 * even in laptop mode
1509 if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1510 total_scanned > nr_reclaimed + nr_reclaimed / 2)
1511 sc.may_writepage = 1;
1514 break; /* kswapd: all done */
1516 * OK, kswapd is getting into trouble. Take a nap, then take
1517 * another pass across the zones.
1519 if (total_scanned && priority < DEF_PRIORITY - 2)
1520 congestion_wait(WRITE, HZ/10);
1523 * We do this so kswapd doesn't build up large priorities for
1524 * example when it is freeing in parallel with allocators. It
1525 * matches the direct reclaim path behaviour in terms of impact
1526 * on zone->*_priority.
1528 if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1533 * Note within each zone the priority level at which this zone was
1534 * brought into a happy state. So that the next thread which scans this
1535 * zone will start out at that priority level.
1537 for (i = 0; i < pgdat->nr_zones; i++) {
1538 struct zone *zone = pgdat->node_zones + i;
1540 zone->prev_priority = temp_priority[i];
1542 if (!all_zones_ok) {
1550 return nr_reclaimed;
1554 * The background pageout daemon, started as a kernel thread
1555 * from the init process.
1557 * This basically trickles out pages so that we have _some_
1558 * free memory available even if there is no other activity
1559 * that frees anything up. This is needed for things like routing
1560 * etc, where we otherwise might have all activity going on in
1561 * asynchronous contexts that cannot page things out.
1563 * If there are applications that are active memory-allocators
1564 * (most normal use), this basically shouldn't matter.
1566 static int kswapd(void *p)
1568 unsigned long order;
1569 pg_data_t *pgdat = (pg_data_t*)p;
1570 struct task_struct *tsk = current;
1572 struct reclaim_state reclaim_state = {
1573 .reclaimed_slab = 0,
1577 cpumask = node_to_cpumask(pgdat->node_id);
1578 if (!cpus_empty(cpumask))
1579 set_cpus_allowed(tsk, cpumask);
1580 current->reclaim_state = &reclaim_state;
1583 * Tell the memory management that we're a "memory allocator",
1584 * and that if we need more memory we should get access to it
1585 * regardless (see "__alloc_pages()"). "kswapd" should
1586 * never get caught in the normal page freeing logic.
1588 * (Kswapd normally doesn't need memory anyway, but sometimes
1589 * you need a small amount of memory in order to be able to
1590 * page out something else, and this flag essentially protects
1591 * us from recursively trying to free more memory as we're
1592 * trying to free the first piece of memory in the first place).
1594 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1599 unsigned long new_order;
1601 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1602 new_order = pgdat->kswapd_max_order;
1603 pgdat->kswapd_max_order = 0;
1604 if (order < new_order) {
1606 * Don't sleep if someone wants a larger 'order'
1611 if (!freezing(current))
1614 order = pgdat->kswapd_max_order;
1616 finish_wait(&pgdat->kswapd_wait, &wait);
1618 if (!try_to_freeze()) {
1619 /* We can speed up thawing tasks if we don't call
1620 * balance_pgdat after returning from the refrigerator
1622 balance_pgdat(pgdat, order);
1629 * A zone is low on free memory, so wake its kswapd task to service it.
1631 void wakeup_kswapd(struct zone *zone, int order)
1635 if (!populated_zone(zone))
1638 pgdat = zone->zone_pgdat;
1639 if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1641 if (pgdat->kswapd_max_order < order)
1642 pgdat->kswapd_max_order = order;
1643 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1645 if (!waitqueue_active(&pgdat->kswapd_wait))
1647 wake_up_interruptible(&pgdat->kswapd_wait);
1652 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
1653 * from LRU lists system-wide, for given pass and priority, and returns the
1654 * number of reclaimed pages
1656 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1658 static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1659 int pass, struct scan_control *sc)
1662 unsigned long nr_to_scan, ret = 0;
1664 for_each_zone(zone) {
1666 if (!populated_zone(zone))
1669 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1672 /* For pass = 0 we don't shrink the active list */
1674 zone->nr_scan_active +=
1675 (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1676 if (zone->nr_scan_active >= nr_pages || pass > 3) {
1677 zone->nr_scan_active = 0;
1678 nr_to_scan = min(nr_pages,
1679 zone_page_state(zone, NR_ACTIVE));
1680 shrink_active_list(nr_to_scan, zone, sc, prio);
1684 zone->nr_scan_inactive +=
1685 (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1686 if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1687 zone->nr_scan_inactive = 0;
1688 nr_to_scan = min(nr_pages,
1689 zone_page_state(zone, NR_INACTIVE));
1690 ret += shrink_inactive_list(nr_to_scan, zone, sc);
1691 if (ret >= nr_pages)
1699 static unsigned long count_lru_pages(void)
1701 return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1705 * Try to free `nr_pages' of memory, system-wide, and return the number of
1708 * Rather than trying to age LRUs the aim is to preserve the overall
1709 * LRU order by reclaiming preferentially
1710 * inactive > active > active referenced > active mapped
1712 unsigned long shrink_all_memory(unsigned long nr_pages)
1714 unsigned long lru_pages, nr_slab;
1715 unsigned long ret = 0;
1717 struct reclaim_state reclaim_state;
1718 struct scan_control sc = {
1719 .gfp_mask = GFP_KERNEL,
1721 .swap_cluster_max = nr_pages,
1723 .swappiness = vm_swappiness,
1724 .isolate_pages = isolate_pages_global,
1727 current->reclaim_state = &reclaim_state;
1729 lru_pages = count_lru_pages();
1730 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1731 /* If slab caches are huge, it's better to hit them first */
1732 while (nr_slab >= lru_pages) {
1733 reclaim_state.reclaimed_slab = 0;
1734 shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1735 if (!reclaim_state.reclaimed_slab)
1738 ret += reclaim_state.reclaimed_slab;
1739 if (ret >= nr_pages)
1742 nr_slab -= reclaim_state.reclaimed_slab;
1746 * We try to shrink LRUs in 5 passes:
1747 * 0 = Reclaim from inactive_list only
1748 * 1 = Reclaim from active list but don't reclaim mapped
1749 * 2 = 2nd pass of type 1
1750 * 3 = Reclaim mapped (normal reclaim)
1751 * 4 = 2nd pass of type 3
1753 for (pass = 0; pass < 5; pass++) {
1756 /* Force reclaiming mapped pages in the passes #3 and #4 */
1759 sc.swappiness = 100;
1762 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1763 unsigned long nr_to_scan = nr_pages - ret;
1766 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1767 if (ret >= nr_pages)
1770 reclaim_state.reclaimed_slab = 0;
1771 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1773 ret += reclaim_state.reclaimed_slab;
1774 if (ret >= nr_pages)
1777 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1778 congestion_wait(WRITE, HZ / 10);
1783 * If ret = 0, we could not shrink LRUs, but there may be something
1788 reclaim_state.reclaimed_slab = 0;
1789 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1790 ret += reclaim_state.reclaimed_slab;
1791 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1795 current->reclaim_state = NULL;
1801 /* It's optimal to keep kswapds on the same CPUs as their memory, but
1802 not required for correctness. So if the last cpu in a node goes
1803 away, we get changed to run anywhere: as the first one comes back,
1804 restore their cpu bindings. */
1805 static int __devinit cpu_callback(struct notifier_block *nfb,
1806 unsigned long action, void *hcpu)
1812 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1813 for_each_node_state(nid, N_HIGH_MEMORY) {
1814 pgdat = NODE_DATA(nid);
1815 mask = node_to_cpumask(pgdat->node_id);
1816 if (any_online_cpu(mask) != NR_CPUS)
1817 /* One of our CPUs online: restore mask */
1818 set_cpus_allowed(pgdat->kswapd, mask);
1825 * This kswapd start function will be called by init and node-hot-add.
1826 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1828 int kswapd_run(int nid)
1830 pg_data_t *pgdat = NODE_DATA(nid);
1836 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1837 if (IS_ERR(pgdat->kswapd)) {
1838 /* failure at boot is fatal */
1839 BUG_ON(system_state == SYSTEM_BOOTING);
1840 printk("Failed to start kswapd on node %d\n",nid);
1846 static int __init kswapd_init(void)
1851 for_each_node_state(nid, N_HIGH_MEMORY)
1853 hotcpu_notifier(cpu_callback, 0);
1857 module_init(kswapd_init)
1863 * If non-zero call zone_reclaim when the number of free pages falls below
1866 int zone_reclaim_mode __read_mostly;
1868 #define RECLAIM_OFF 0
1869 #define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */
1870 #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
1871 #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */
1874 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1875 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1878 #define ZONE_RECLAIM_PRIORITY 4
1881 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1884 int sysctl_min_unmapped_ratio = 1;
1887 * If the number of slab pages in a zone grows beyond this percentage then
1888 * slab reclaim needs to occur.
1890 int sysctl_min_slab_ratio = 5;
1893 * Try to free up some pages from this zone through reclaim.
1895 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1897 /* Minimum pages needed in order to stay on node */
1898 const unsigned long nr_pages = 1 << order;
1899 struct task_struct *p = current;
1900 struct reclaim_state reclaim_state;
1902 unsigned long nr_reclaimed = 0;
1903 struct scan_control sc = {
1904 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
1905 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
1906 .swap_cluster_max = max_t(unsigned long, nr_pages,
1908 .gfp_mask = gfp_mask,
1909 .swappiness = vm_swappiness,
1910 .isolate_pages = isolate_pages_global,
1912 unsigned long slab_reclaimable;
1914 disable_swap_token();
1917 * We need to be able to allocate from the reserves for RECLAIM_SWAP
1918 * and we also need to be able to write out pages for RECLAIM_WRITE
1921 p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
1922 reclaim_state.reclaimed_slab = 0;
1923 p->reclaim_state = &reclaim_state;
1925 if (zone_page_state(zone, NR_FILE_PAGES) -
1926 zone_page_state(zone, NR_FILE_MAPPED) >
1927 zone->min_unmapped_pages) {
1929 * Free memory by calling shrink zone with increasing
1930 * priorities until we have enough memory freed.
1932 priority = ZONE_RECLAIM_PRIORITY;
1934 note_zone_scanning_priority(zone, priority);
1935 nr_reclaimed += shrink_zone(priority, zone, &sc);
1937 } while (priority >= 0 && nr_reclaimed < nr_pages);
1940 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1941 if (slab_reclaimable > zone->min_slab_pages) {
1943 * shrink_slab() does not currently allow us to determine how
1944 * many pages were freed in this zone. So we take the current
1945 * number of slab pages and shake the slab until it is reduced
1946 * by the same nr_pages that we used for reclaiming unmapped
1949 * Note that shrink_slab will free memory on all zones and may
1952 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
1953 zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
1954 slab_reclaimable - nr_pages)
1958 * Update nr_reclaimed by the number of slab pages we
1959 * reclaimed from this zone.
1961 nr_reclaimed += slab_reclaimable -
1962 zone_page_state(zone, NR_SLAB_RECLAIMABLE);
1965 p->reclaim_state = NULL;
1966 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1967 return nr_reclaimed >= nr_pages;
1970 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1976 * Zone reclaim reclaims unmapped file backed pages and
1977 * slab pages if we are over the defined limits.
1979 * A small portion of unmapped file backed pages is needed for
1980 * file I/O otherwise pages read by file I/O will be immediately
1981 * thrown out if the zone is overallocated. So we do not reclaim
1982 * if less than a specified percentage of the zone is used by
1983 * unmapped file backed pages.
1985 if (zone_page_state(zone, NR_FILE_PAGES) -
1986 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
1987 && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
1988 <= zone->min_slab_pages)
1991 if (zone_is_all_unreclaimable(zone))
1995 * Do not scan if the allocation should not be delayed.
1997 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2001 * Only run zone reclaim on the local zone or on zones that do not
2002 * have associated processors. This will favor the local processor
2003 * over remote processors and spread off node memory allocations
2004 * as wide as possible.
2006 node_id = zone_to_nid(zone);
2007 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2010 if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2012 ret = __zone_reclaim(zone, gfp_mask, order);
2013 zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);