2 * linux/kernel/power/snapshot.c
4 * This file provide system snapshot/restore functionality.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
8 * This file is released under the GPLv2, and is based on swsusp.c.
13 #include <linux/module.h>
15 #include <linux/suspend.h>
16 #include <linux/smp_lock.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/syscalls.h>
25 #include <linux/console.h>
26 #include <linux/highmem.h>
28 #include <asm/uaccess.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/tlbflush.h>
36 struct pbe *pagedir_nosave;
37 unsigned int nr_copy_pages;
40 unsigned int count_highmem_pages(void)
43 unsigned long zone_pfn;
47 if (is_highmem(zone)) {
48 mark_free_pages(zone);
49 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
51 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
54 page = pfn_to_page(pfn);
55 if (PageReserved(page))
57 if (PageNosaveFree(page))
68 struct highmem_page *next;
71 static struct highmem_page *highmem_copy;
73 static int save_highmem_zone(struct zone *zone)
75 unsigned long zone_pfn;
76 mark_free_pages(zone);
77 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
79 struct highmem_page *save;
81 unsigned long pfn = zone_pfn + zone->zone_start_pfn;
87 page = pfn_to_page(pfn);
89 * This condition results from rvmalloc() sans vmalloc_32()
90 * and architectural memory reservations. This should be
91 * corrected eventually when the cases giving rise to this
92 * are better understood.
94 if (PageReserved(page)) {
95 printk("highmem reserved page?!\n");
98 BUG_ON(PageNosave(page));
99 if (PageNosaveFree(page))
101 save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
104 save->next = highmem_copy;
106 save->data = (void *) get_zeroed_page(GFP_ATOMIC);
111 kaddr = kmap_atomic(page, KM_USER0);
112 memcpy(save->data, kaddr, PAGE_SIZE);
113 kunmap_atomic(kaddr, KM_USER0);
119 int save_highmem(void)
124 pr_debug("swsusp: Saving Highmem\n");
125 for_each_zone (zone) {
126 if (is_highmem(zone))
127 res = save_highmem_zone(zone);
134 int restore_highmem(void)
136 printk("swsusp: Restoring Highmem\n");
137 while (highmem_copy) {
138 struct highmem_page *save = highmem_copy;
140 highmem_copy = save->next;
142 kaddr = kmap_atomic(save->page, KM_USER0);
143 memcpy(kaddr, save->data, PAGE_SIZE);
144 kunmap_atomic(kaddr, KM_USER0);
145 free_page((long) save->data);
152 static int pfn_is_nosave(unsigned long pfn)
154 unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
155 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
156 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
160 * saveable - Determine whether a page should be cloned or not.
163 * We save a page if it's Reserved, and not in the range of pages
164 * statically defined as 'unsaveable', or if it isn't reserved, and
165 * isn't part of a free chunk of pages.
168 static int saveable(struct zone *zone, unsigned long *zone_pfn)
170 unsigned long pfn = *zone_pfn + zone->zone_start_pfn;
176 page = pfn_to_page(pfn);
177 BUG_ON(PageReserved(page) && PageNosave(page));
178 if (PageNosave(page))
180 if (PageReserved(page) && pfn_is_nosave(pfn))
182 if (PageNosaveFree(page))
188 unsigned int count_data_pages(void)
191 unsigned long zone_pfn;
194 for_each_zone (zone) {
195 if (is_highmem(zone))
197 mark_free_pages(zone);
198 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
199 n += saveable(zone, &zone_pfn);
204 static void copy_data_pages(struct pbe *pblist)
207 unsigned long zone_pfn;
211 for_each_zone (zone) {
212 if (is_highmem(zone))
214 mark_free_pages(zone);
215 /* This is necessary for swsusp_free() */
216 for_each_pb_page (p, pblist)
217 SetPageNosaveFree(virt_to_page(p));
218 for_each_pbe (p, pblist)
219 SetPageNosaveFree(virt_to_page(p->address));
220 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
221 if (saveable(zone, &zone_pfn)) {
223 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
225 pbe->orig_address = (unsigned long)page_address(page);
226 /* copy_page is not usable for copying task structs. */
227 memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
237 * free_pagedir - free pages allocated with alloc_pagedir()
240 void free_pagedir(struct pbe *pblist)
245 pbe = (pblist + PB_PAGE_SKIP)->next;
246 ClearPageNosave(virt_to_page(pblist));
247 ClearPageNosaveFree(virt_to_page(pblist));
248 free_page((unsigned long)pblist);
254 * fill_pb_page - Create a list of PBEs on a given memory page
257 static inline void fill_pb_page(struct pbe *pbpage)
262 pbpage += PB_PAGE_SKIP;
265 while (++p < pbpage);
269 * create_pbe_list - Create a list of PBEs on top of a given chain
270 * of memory pages allocated with alloc_pagedir()
273 static inline void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
275 struct pbe *pbpage, *p;
276 unsigned int num = PBES_PER_PAGE;
278 for_each_pb_page (pbpage, pblist) {
282 fill_pb_page(pbpage);
283 num += PBES_PER_PAGE;
286 for (num -= PBES_PER_PAGE - 1, p = pbpage; num < nr_pages; p++, num++)
293 * On resume it is necessary to trace and eventually free the unsafe
294 * pages that have been allocated, because they are needed for I/O
295 * (on x86-64 we likely will "eat" these pages once again while
296 * creating the temporary page translation tables)
300 struct eaten_page *next;
301 char padding[PAGE_SIZE - sizeof(void *)];
304 static struct eaten_page *eaten_pages = NULL;
306 void release_eaten_pages(void)
308 struct eaten_page *p, *q;
313 /* We don't want swsusp_free() to free this page again */
314 ClearPageNosave(virt_to_page(p));
315 free_page((unsigned long)p);
322 * @safe_needed - on resume, for storing the PBE list and the image,
323 * we can only use memory pages that do not conflict with the pages
324 * which had been used before suspend.
326 * The unsafe pages are marked with the PG_nosave_free flag
328 * Allocated but unusable (ie eaten) memory pages should be marked
329 * so that swsusp_free() can release them
332 static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed)
338 res = (void *)get_zeroed_page(gfp_mask);
339 if (res && PageNosaveFree(virt_to_page(res))) {
340 /* This is for swsusp_free() */
341 SetPageNosave(virt_to_page(res));
342 ((struct eaten_page *)res)->next = eaten_pages;
345 } while (res && PageNosaveFree(virt_to_page(res)));
347 res = (void *)get_zeroed_page(gfp_mask);
349 SetPageNosave(virt_to_page(res));
350 SetPageNosaveFree(virt_to_page(res));
355 unsigned long get_safe_page(gfp_t gfp_mask)
357 return (unsigned long)alloc_image_page(gfp_mask, 1);
361 * alloc_pagedir - Allocate the page directory.
363 * First, determine exactly how many pages we need and
366 * We arrange the pages in a chain: each page is an array of PBES_PER_PAGE
367 * struct pbe elements (pbes) and the last element in the page points
370 * On each page we set up a list of struct_pbe elements.
373 struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed)
376 struct pbe *pblist, *pbe;
381 pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
382 pblist = alloc_image_page(gfp_mask, safe_needed);
383 /* FIXME: rewrite this ugly loop */
384 for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
385 pbe = pbe->next, num += PBES_PER_PAGE) {
387 pbe->next = alloc_image_page(gfp_mask, safe_needed);
389 if (!pbe) { /* get_zeroed_page() failed */
390 free_pagedir(pblist);
393 create_pbe_list(pblist, nr_pages);
398 * Free pages we allocated for suspend. Suspend pages are alocated
399 * before atomic copy, so we need to free them after resume.
402 void swsusp_free(void)
405 unsigned long zone_pfn;
407 for_each_zone(zone) {
408 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
409 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
411 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
412 if (PageNosave(page) && PageNosaveFree(page)) {
413 ClearPageNosave(page);
414 ClearPageNosaveFree(page);
415 free_page((long) page_address(page));
423 * enough_free_mem - Make sure we enough free memory to snapshot.
425 * Returns TRUE or FALSE after checking the number of available
429 static int enough_free_mem(unsigned int nr_pages)
435 if (!is_highmem(zone))
436 n += zone->free_pages;
437 pr_debug("swsusp: available memory: %u pages\n", n);
438 return n > (nr_pages + PAGES_FOR_IO +
439 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE);
442 int alloc_data_pages(struct pbe *pblist, gfp_t gfp_mask, int safe_needed)
446 for_each_pbe (p, pblist) {
447 p->address = (unsigned long)alloc_image_page(gfp_mask, safe_needed);
454 static struct pbe *swsusp_alloc(unsigned int nr_pages)
458 if (!(pblist = alloc_pagedir(nr_pages, GFP_ATOMIC | __GFP_COLD, 0))) {
459 printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
463 if (alloc_data_pages(pblist, GFP_ATOMIC | __GFP_COLD, 0)) {
464 printk(KERN_ERR "suspend: Allocating image pages failed.\n");
472 asmlinkage int swsusp_save(void)
474 unsigned int nr_pages;
476 pr_debug("swsusp: critical section: \n");
479 nr_pages = count_data_pages();
480 printk("swsusp: Need to copy %u pages\n", nr_pages);
482 pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
484 (nr_pages + PBES_PER_PAGE - 1) / PBES_PER_PAGE,
485 PAGES_FOR_IO, nr_free_pages());
487 if (!enough_free_mem(nr_pages)) {
488 printk(KERN_ERR "swsusp: Not enough free memory\n");
492 pagedir_nosave = swsusp_alloc(nr_pages);
496 /* During allocating of suspend pagedir, new cold pages may appear.
500 copy_data_pages(pagedir_nosave);
503 * End of critical section. From now on, we can write to memory,
504 * but we should not touch disk. This specially means we must _not_
505 * touch swap space! Except we must write out our image of course.
508 nr_copy_pages = nr_pages;
510 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages);