kgdb: support for ARCH=arm
[linux-2.6] / mm / page_io.c
1 /*
2  *  linux/mm/page_io.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *
6  *  Swap reorganised 29.12.95, 
7  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
8  *  Removed race in async swapping. 14.4.1996. Bruno Haible
9  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11  */
12
13 #include <linux/mm.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/swapops.h>
19 #include <linux/writeback.h>
20 #include <asm/pgtable.h>
21
22 static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
23                                 struct page *page, bio_end_io_t end_io)
24 {
25         struct bio *bio;
26
27         bio = bio_alloc(gfp_flags, 1);
28         if (bio) {
29                 struct swap_info_struct *sis;
30                 swp_entry_t entry = { .val = index, };
31
32                 sis = get_swap_info_struct(swp_type(entry));
33                 bio->bi_sector = map_swap_page(sis, swp_offset(entry)) *
34                                         (PAGE_SIZE >> 9);
35                 bio->bi_bdev = sis->bdev;
36                 bio->bi_io_vec[0].bv_page = page;
37                 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
38                 bio->bi_io_vec[0].bv_offset = 0;
39                 bio->bi_vcnt = 1;
40                 bio->bi_idx = 0;
41                 bio->bi_size = PAGE_SIZE;
42                 bio->bi_end_io = end_io;
43         }
44         return bio;
45 }
46
47 static void end_swap_bio_write(struct bio *bio, int err)
48 {
49         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
50         struct page *page = bio->bi_io_vec[0].bv_page;
51
52         if (!uptodate) {
53                 SetPageError(page);
54                 /*
55                  * We failed to write the page out to swap-space.
56                  * Re-dirty the page in order to avoid it being reclaimed.
57                  * Also print a dire warning that things will go BAD (tm)
58                  * very quickly.
59                  *
60                  * Also clear PG_reclaim to avoid rotate_reclaimable_page()
61                  */
62                 set_page_dirty(page);
63                 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
64                                 imajor(bio->bi_bdev->bd_inode),
65                                 iminor(bio->bi_bdev->bd_inode),
66                                 (unsigned long long)bio->bi_sector);
67                 ClearPageReclaim(page);
68         }
69         end_page_writeback(page);
70         bio_put(bio);
71 }
72
73 void end_swap_bio_read(struct bio *bio, int err)
74 {
75         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
76         struct page *page = bio->bi_io_vec[0].bv_page;
77
78         if (!uptodate) {
79                 SetPageError(page);
80                 ClearPageUptodate(page);
81                 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
82                                 imajor(bio->bi_bdev->bd_inode),
83                                 iminor(bio->bi_bdev->bd_inode),
84                                 (unsigned long long)bio->bi_sector);
85         } else {
86                 SetPageUptodate(page);
87         }
88         unlock_page(page);
89         bio_put(bio);
90 }
91
92 /*
93  * We may have stale swap cache pages in memory: notice
94  * them here and get rid of the unnecessary final write.
95  */
96 int swap_writepage(struct page *page, struct writeback_control *wbc)
97 {
98         struct bio *bio;
99         int ret = 0, rw = WRITE;
100
101         if (remove_exclusive_swap_page(page)) {
102                 unlock_page(page);
103                 goto out;
104         }
105         bio = get_swap_bio(GFP_NOIO, page_private(page), page,
106                                 end_swap_bio_write);
107         if (bio == NULL) {
108                 set_page_dirty(page);
109                 unlock_page(page);
110                 ret = -ENOMEM;
111                 goto out;
112         }
113         if (wbc->sync_mode == WB_SYNC_ALL)
114                 rw |= (1 << BIO_RW_SYNC);
115         count_vm_event(PSWPOUT);
116         set_page_writeback(page);
117         unlock_page(page);
118         submit_bio(rw, bio);
119 out:
120         return ret;
121 }
122
123 int swap_readpage(struct file *file, struct page *page)
124 {
125         struct bio *bio;
126         int ret = 0;
127
128         BUG_ON(!PageLocked(page));
129         BUG_ON(PageUptodate(page));
130         bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
131                                 end_swap_bio_read);
132         if (bio == NULL) {
133                 unlock_page(page);
134                 ret = -ENOMEM;
135                 goto out;
136         }
137         count_vm_event(PSWPIN);
138         submit_bio(READ, bio);
139 out:
140         return ret;
141 }