2 * Memory Migration functionality - linux/mm/migration.c
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter <clameter@sgi.com>
15 #include <linux/migrate.h>
16 #include <linux/module.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/buffer_head.h> /* for try_to_release_page(),
20 buffer_heads_over_limit */
21 #include <linux/mm_inline.h>
22 #include <linux/pagevec.h>
23 #include <linux/rmap.h>
24 #include <linux/topology.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/swapops.h>
33 /* The maximum number of pages to take off the LRU for migration */
34 #define MIGRATE_CHUNK_SIZE 256
36 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
39 * Isolate one page from the LRU lists. If successful put it onto
40 * the indicated list with elevated page count.
43 * -EBUSY: page not on LRU list
44 * 0: page removed from LRU list and added to the specified list.
46 int isolate_lru_page(struct page *page, struct list_head *pagelist)
51 struct zone *zone = page_zone(page);
53 spin_lock_irq(&zone->lru_lock);
59 del_page_from_active_list(zone, page);
61 del_page_from_inactive_list(zone, page);
62 list_add_tail(&page->lru, pagelist);
64 spin_unlock_irq(&zone->lru_lock);
70 * migrate_prep() needs to be called after we have compiled the list of pages
71 * to be migrated using isolate_lru_page() but before we begin a series of calls
74 int migrate_prep(void)
76 /* Must have swap device for migration */
77 if (nr_swap_pages <= 0)
81 * Clear the LRU lists so pages can be isolated.
82 * Note that pages may be moved off the LRU after we have
83 * drained them. Those pages will fail to migrate like other
84 * pages that may be busy.
91 static inline void move_to_lru(struct page *page)
94 if (PageActive(page)) {
96 * lru_cache_add_active checks that
97 * the PG_active bit is off.
99 ClearPageActive(page);
100 lru_cache_add_active(page);
108 * Add isolated pages on the list back to the LRU.
110 * returns the number of pages put back.
112 int putback_lru_pages(struct list_head *l)
118 list_for_each_entry_safe(page, page2, l, lru) {
126 * Non migratable page
128 int fail_migrate_page(struct page *newpage, struct page *page)
132 EXPORT_SYMBOL(fail_migrate_page);
135 * swapout a single page
136 * page is locked upon entry, unlocked on exit
138 static int swap_page(struct page *page)
140 struct address_space *mapping = page_mapping(page);
142 if (page_mapped(page) && mapping)
143 if (try_to_unmap(page, 1) != SWAP_SUCCESS)
146 if (PageDirty(page)) {
147 /* Page is dirty, try to write it out here */
148 switch(pageout(page, mapping)) {
157 ; /* try to free the page below */
161 if (PagePrivate(page)) {
162 if (!try_to_release_page(page, GFP_KERNEL) ||
163 (!mapping && page_count(page) == 1))
167 if (remove_mapping(mapping, page)) {
179 EXPORT_SYMBOL(swap_page);
182 * Remove references for a page and establish the new page with the correct
183 * basic settings to be able to stop accesses to the page.
185 int migrate_page_remove_references(struct page *newpage,
186 struct page *page, int nr_refs)
188 struct address_space *mapping = page_mapping(page);
189 struct page **radix_pointer;
192 * Avoid doing any of the following work if the page count
193 * indicates that the page is in use or truncate has removed
196 if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
200 * Establish swap ptes for anonymous pages or destroy pte
203 * In order to reestablish file backed mappings the fault handlers
204 * will take the radix tree_lock which may then be used to stop
205 * processses from accessing this page until the new page is ready.
207 * A process accessing via a swap pte (an anonymous page) will take a
208 * page_lock on the old page which will block the process until the
209 * migration attempt is complete. At that time the PageSwapCache bit
210 * will be examined. If the page was migrated then the PageSwapCache
211 * bit will be clear and the operation to retrieve the page will be
212 * retried which will find the new page in the radix tree. Then a new
213 * direct mapping may be generated based on the radix tree contents.
215 * If the page was not migrated then the PageSwapCache bit
216 * is still set and the operation may continue.
218 if (try_to_unmap(page, 1) == SWAP_FAIL)
219 /* A vma has VM_LOCKED set -> permanent failure */
223 * Give up if we were unable to remove all mappings.
225 if (page_mapcount(page))
228 write_lock_irq(&mapping->tree_lock);
230 radix_pointer = (struct page **)radix_tree_lookup_slot(
234 if (!page_mapping(page) || page_count(page) != nr_refs ||
235 *radix_pointer != page) {
236 write_unlock_irq(&mapping->tree_lock);
241 * Now we know that no one else is looking at the page.
243 * Certain minimal information about a page must be available
244 * in order for other subsystems to properly handle the page if they
245 * find it through the radix tree update before we are finished
249 newpage->index = page->index;
250 newpage->mapping = page->mapping;
251 if (PageSwapCache(page)) {
252 SetPageSwapCache(newpage);
253 set_page_private(newpage, page_private(page));
256 *radix_pointer = newpage;
258 write_unlock_irq(&mapping->tree_lock);
262 EXPORT_SYMBOL(migrate_page_remove_references);
265 * Copy the page to its new location
267 void migrate_page_copy(struct page *newpage, struct page *page)
269 copy_highpage(newpage, page);
272 SetPageError(newpage);
273 if (PageReferenced(page))
274 SetPageReferenced(newpage);
275 if (PageUptodate(page))
276 SetPageUptodate(newpage);
277 if (PageActive(page))
278 SetPageActive(newpage);
279 if (PageChecked(page))
280 SetPageChecked(newpage);
281 if (PageMappedToDisk(page))
282 SetPageMappedToDisk(newpage);
284 if (PageDirty(page)) {
285 clear_page_dirty_for_io(page);
286 set_page_dirty(newpage);
289 ClearPageSwapCache(page);
290 ClearPageActive(page);
291 ClearPagePrivate(page);
292 set_page_private(page, 0);
293 page->mapping = NULL;
296 * If any waiters have accumulated on the new page then
299 if (PageWriteback(newpage))
300 end_page_writeback(newpage);
302 EXPORT_SYMBOL(migrate_page_copy);
305 * Common logic to directly migrate a single page suitable for
306 * pages that do not use PagePrivate.
308 * Pages are locked upon entry and exit.
310 int migrate_page(struct page *newpage, struct page *page)
314 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
316 rc = migrate_page_remove_references(newpage, page, 2);
321 migrate_page_copy(newpage, page);
324 * Remove auxiliary swap entries and replace
325 * them with real ptes.
327 * Note that a real pte entry will allow processes that are not
328 * waiting on the page lock to use the new page via the page tables
329 * before the new page is unlocked.
331 remove_from_swap(newpage);
334 EXPORT_SYMBOL(migrate_page);
339 * Two lists are passed to this function. The first list
340 * contains the pages isolated from the LRU to be migrated.
341 * The second list contains new pages that the pages isolated
342 * can be moved to. If the second list is NULL then all
343 * pages are swapped out.
345 * The function returns after 10 attempts or if no pages
346 * are movable anymore because to has become empty
347 * or no retryable pages exist anymore.
349 * Return: Number of pages not migrated when "to" ran empty.
351 int migrate_pages(struct list_head *from, struct list_head *to,
352 struct list_head *moved, struct list_head *failed)
359 int swapwrite = current->flags & PF_SWAPWRITE;
363 current->flags |= PF_SWAPWRITE;
368 list_for_each_entry_safe(page, page2, from, lru) {
369 struct page *newpage = NULL;
370 struct address_space *mapping;
375 if (page_count(page) == 1)
376 /* page was freed from under us. So we are done. */
379 if (to && list_empty(to))
383 * Skip locked pages during the first two passes to give the
384 * functions holding the lock time to release the page. Later we
385 * use lock_page() to have a higher chance of acquiring the
392 if (TestSetPageLocked(page))
396 * Only wait on writeback if we have already done a pass where
397 * we we may have triggered writeouts for lots of pages.
400 wait_on_page_writeback(page);
402 if (PageWriteback(page))
407 * Anonymous pages must have swap cache references otherwise
408 * the information contained in the page maps cannot be
411 if (PageAnon(page) && !PageSwapCache(page)) {
412 if (!add_to_swap(page, GFP_KERNEL)) {
419 rc = swap_page(page);
423 newpage = lru_to_page(to);
427 * Pages are properly locked and writeback is complete.
428 * Try to migrate the page.
430 mapping = page_mapping(page);
434 if (mapping->a_ops->migratepage) {
436 * Most pages have a mapping and most filesystems
437 * should provide a migration function. Anonymous
438 * pages are part of swap space which also has its
439 * own migration function. This is the most common
440 * path for page migration.
442 rc = mapping->a_ops->migratepage(newpage, page);
447 * Default handling if a filesystem does not provide
448 * a migration function. We can only migrate clean
449 * pages so try to write out any dirty pages first.
451 if (PageDirty(page)) {
452 switch (pageout(page, mapping)) {
458 unlock_page(newpage);
462 ; /* try to migrate the page below */
467 * Buffers are managed in a filesystem specific way.
468 * We must have no buffers or drop them.
470 if (!page_has_buffers(page) ||
471 try_to_release_page(page, GFP_KERNEL)) {
472 rc = migrate_page(newpage, page);
477 * On early passes with mapped pages simply
478 * retry. There may be a lock held for some
479 * buffers that may go away. Later
484 * Persistently unable to drop buffers..... As a
485 * measure of last resort we fall back to
488 unlock_page(newpage);
490 rc = swap_page(page);
495 unlock_page(newpage);
504 /* Permanent failure */
505 list_move(&page->lru, failed);
509 /* Successful migration. Return page to LRU */
510 move_to_lru(newpage);
512 list_move(&page->lru, moved);
515 if (retry && pass++ < 10)
519 current->flags &= ~PF_SWAPWRITE;
521 return nr_failed + retry;
525 * Migration function for pages with buffers. This function can only be used
526 * if the underlying filesystem guarantees that no other references to "page"
529 int buffer_migrate_page(struct page *newpage, struct page *page)
531 struct address_space *mapping = page->mapping;
532 struct buffer_head *bh, *head;
538 if (!page_has_buffers(page))
539 return migrate_page(newpage, page);
541 head = page_buffers(page);
543 rc = migrate_page_remove_references(newpage, page, 3);
552 bh = bh->b_this_page;
554 } while (bh != head);
556 ClearPagePrivate(page);
557 set_page_private(newpage, page_private(page));
558 set_page_private(page, 0);
564 set_bh_page(bh, newpage, bh_offset(bh));
565 bh = bh->b_this_page;
567 } while (bh != head);
569 SetPagePrivate(newpage);
571 migrate_page_copy(newpage, page);
577 bh = bh->b_this_page;
579 } while (bh != head);
583 EXPORT_SYMBOL(buffer_migrate_page);
586 * Migrate the list 'pagelist' of pages to a certain destination.
588 * Specify destination with either non-NULL vma or dest_node >= 0
589 * Return the number of pages not migrated or error code
591 int migrate_pages_to(struct list_head *pagelist,
592 struct vm_area_struct *vma, int dest)
598 unsigned long offset = 0;
605 list_for_each(p, pagelist) {
608 * The address passed to alloc_page_vma is used to
609 * generate the proper interleave behavior. We fake
610 * the address here by an increasing offset in order
611 * to get the proper distribution of pages.
613 * No decision has been made as to which page
614 * a certain old page is moved to so we cannot
615 * specify the correct address.
617 page = alloc_page_vma(GFP_HIGHUSER, vma,
618 offset + vma->vm_start);
622 page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
628 list_add_tail(&page->lru, &newlist);
630 if (nr_pages > MIGRATE_CHUNK_SIZE)
633 err = migrate_pages(pagelist, &newlist, &moved, &failed);
635 putback_lru_pages(&moved); /* Call release pages instead ?? */
637 if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
640 /* Return leftover allocated pages */
641 while (!list_empty(&newlist)) {
642 page = list_entry(newlist.next, struct page, lru);
643 list_del(&page->lru);
646 list_splice(&failed, pagelist);
650 /* Calculate number of leftover pages */
652 list_for_each(p, pagelist)