2 * linux/kernel/power/swsusp.c
4 * This file provides code to write suspend image to swap and read it back.
6 * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@suse.cz>
9 * This file is released under the GPLv2.
11 * I'd like to thank the following people for their work:
13 * Pavel Machek <pavel@ucw.cz>:
14 * Modifications, defectiveness pointing, being with me at the very beginning,
15 * suspend to swap space, stop all tasks. Port to 2.4.18-ac and 2.5.17.
17 * Steve Doddi <dirk@loth.demon.co.uk>:
18 * Support the possibility of hardware state restoring.
20 * Raph <grey.havens@earthling.net>:
21 * Support for preserving states of network devices and virtual console
22 * (including X and svgatextmode)
24 * Kurt Garloff <garloff@suse.de>:
25 * Straightened the critical function in order to prevent compilers from
26 * playing tricks with local variables.
28 * Andreas Mohr <a.mohr@mailto.de>
30 * Alex Badea <vampire@go.ro>:
33 * Rafael J. Wysocki <rjw@sisk.pl>
34 * Reworked the freeing of memory and the handling of swap
36 * More state savers are welcome. Especially for the scsi layer...
38 * For TODOs,FIXMEs also look in Documentation/power/swsusp.txt
42 #include <linux/suspend.h>
43 #include <linux/spinlock.h>
44 #include <linux/kernel.h>
45 #include <linux/major.h>
46 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/bootmem.h>
50 #include <linux/syscalls.h>
51 #include <linux/highmem.h>
52 #include <linux/time.h>
53 #include <linux/rbtree.h>
58 * Preferred image size in bytes (tunable via /sys/power/image_size).
59 * When it is set to N, swsusp will do its best to ensure the image
60 * size will not exceed N bytes, but if that is impossible, it will
61 * try to create the smallest image possible.
63 unsigned long image_size = 500 * 1024 * 1024;
65 int in_suspend __nosavedata = 0;
68 unsigned int count_highmem_pages(void);
69 int restore_highmem(void);
71 static inline int restore_highmem(void) { return 0; }
72 static inline unsigned int count_highmem_pages(void) { return 0; }
76 * The following functions are used for tracing the allocated
77 * swap pages, so that they can be freed in case of an error.
80 struct swsusp_extent {
86 static struct rb_root swsusp_extents = RB_ROOT;
88 static int swsusp_extents_insert(unsigned long swap_offset)
90 struct rb_node **new = &(swsusp_extents.rb_node);
91 struct rb_node *parent = NULL;
92 struct swsusp_extent *ext;
94 /* Figure out where to put the new node */
96 ext = container_of(*new, struct swsusp_extent, node);
98 if (swap_offset < ext->start) {
100 if (swap_offset == ext->start - 1) {
104 new = &((*new)->rb_left);
105 } else if (swap_offset > ext->end) {
107 if (swap_offset == ext->end + 1) {
111 new = &((*new)->rb_right);
113 /* It already is in the tree */
117 /* Add the new node and rebalance the tree. */
118 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
122 ext->start = swap_offset;
123 ext->end = swap_offset;
124 rb_link_node(&ext->node, parent, new);
125 rb_insert_color(&ext->node, &swsusp_extents);
130 * alloc_swapdev_block - allocate a swap page and register that it has
131 * been allocated, so that it can be freed in case of an error.
134 sector_t alloc_swapdev_block(int swap)
136 unsigned long offset;
138 offset = swp_offset(get_swap_page_of_type(swap));
140 if (swsusp_extents_insert(offset))
141 swap_free(swp_entry(swap, offset));
143 return swapdev_block(swap, offset);
149 * free_all_swap_pages - free swap pages allocated for saving image data.
150 * It also frees the extents used to register which swap entres had been
154 void free_all_swap_pages(int swap)
156 struct rb_node *node;
158 while ((node = swsusp_extents.rb_node)) {
159 struct swsusp_extent *ext;
160 unsigned long offset;
162 ext = container_of(node, struct swsusp_extent, node);
163 rb_erase(node, &swsusp_extents);
164 for (offset = ext->start; offset <= ext->end; offset++)
165 swap_free(swp_entry(swap, offset));
171 int swsusp_swap_in_use(void)
173 return (swsusp_extents.rb_node != NULL);
177 * swsusp_show_speed - print the time elapsed between two events represented by
180 * @nr_pages - number of pages processed between @start and @stop
181 * @msg - introductory message to print
184 void swsusp_show_speed(struct timeval *start, struct timeval *stop,
185 unsigned nr_pages, char *msg)
187 s64 elapsed_centisecs64;
192 elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
193 do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
194 centisecs = elapsed_centisecs64;
196 centisecs = 1; /* avoid div-by-zero */
197 k = nr_pages * (PAGE_SIZE / 1024);
198 kps = (k * 100) / centisecs;
199 printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
200 centisecs / 100, centisecs % 100,
201 kps / 1000, (kps % 1000) / 10);
205 * swsusp_shrink_memory - Try to free as much memory as needed
207 * ... but do not OOM-kill anyone
209 * Notice: all userland should be stopped before it is called, or
210 * livelock is possible.
213 #define SHRINK_BITE 10000
214 static inline unsigned long __shrink_memory(long tmp)
216 if (tmp > SHRINK_BITE)
218 return shrink_all_memory(tmp);
221 int swsusp_shrink_memory(void)
225 unsigned long pages = 0;
228 struct timeval start, stop;
230 printk("Shrinking memory... ");
231 do_gettimeofday(&start);
233 long size, highmem_size;
235 highmem_size = count_highmem_pages();
236 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
238 size += highmem_size;
240 if (populated_zone(zone)) {
241 tmp += snapshot_additional_pages(zone);
242 if (is_highmem(zone)) {
244 zone_page_state(zone, NR_FREE_PAGES);
246 tmp -= zone_page_state(zone, NR_FREE_PAGES);
247 tmp += zone->lowmem_reserve[ZONE_NORMAL];
251 if (highmem_size < 0)
256 tmp = __shrink_memory(tmp);
260 } else if (size > image_size / PAGE_SIZE) {
261 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
264 printk("\b%c", p[i++%4]);
266 do_gettimeofday(&stop);
267 printk("\bdone (%lu pages freed)\n", pages);
268 swsusp_show_speed(&start, &stop, pages, "Freed");
273 int swsusp_suspend(void)
277 if ((error = arch_prepare_suspend()))
281 /* At this point, device_suspend() has been called, but *not*
282 * device_power_down(). We *must* device_power_down() now.
283 * Otherwise, drivers for some devices (e.g. interrupt controllers)
284 * become desynchronized with the actual state of the hardware
285 * at resume time, and evil weirdness ensues.
287 if ((error = device_power_down(PMSG_FREEZE))) {
288 printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
292 save_processor_state();
293 if ((error = swsusp_arch_suspend()))
294 printk(KERN_ERR "Error %d suspending\n", error);
295 /* Restore control flow magically appears here */
296 restore_processor_state();
297 /* NOTE: device_power_up() is just a resume() for devices
298 * that suspended with irqs off ... no overall powerup.
306 int swsusp_resume(void)
311 /* NOTE: device_power_down() is just a suspend() with irqs off;
312 * it has no special "power things down" semantics
314 if (device_power_down(PMSG_PRETHAW))
315 printk(KERN_ERR "Some devices failed to power down, very bad\n");
316 /* We'll ignore saved state, but this gets preempt count (etc) right */
317 save_processor_state();
318 error = restore_highmem();
320 error = swsusp_arch_resume();
321 /* The code below is only ever reached in case of a failure.
322 * Otherwise execution continues at place where
323 * swsusp_arch_suspend() was called
326 /* This call to restore_highmem() undos the previous one */
329 /* The only reason why swsusp_arch_resume() can fail is memory being
330 * very tight, so we have to free it as soon as we can to avoid
331 * subsequent failures
334 restore_processor_state();
335 touch_softlockup_watchdog();