2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/version.h>
36 #include <linux/pagevec.h>
39 #include "transaction.h"
40 #include "btrfs_inode.h"
42 #include "ordered-data.h"
44 #include "compression.h"
45 #include "extent_io.h"
46 #include "extent_map.h"
48 struct compressed_bio {
49 /* number of bios pending for this compressed extent */
50 atomic_t pending_bios;
52 /* the pages with the compressed data on them */
53 struct page **compressed_pages;
55 /* inode that owns this data */
58 /* starting offset in the inode for our pages */
61 /* number of bytes in the inode we're working on */
64 /* number of bytes on disk */
65 unsigned long compressed_len;
67 /* number of compressed pages in the array */
68 unsigned long nr_pages;
73 /* for reads, this is the bio we are copying the data into */
77 static struct bio *compressed_bio_alloc(struct block_device *bdev,
78 u64 first_byte, gfp_t gfp_flags)
83 nr_vecs = bio_get_nr_vecs(bdev);
84 bio = bio_alloc(gfp_flags, nr_vecs);
86 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
87 while (!bio && (nr_vecs /= 2))
88 bio = bio_alloc(gfp_flags, nr_vecs);
94 bio->bi_sector = first_byte >> 9;
99 /* when we finish reading compressed pages from the disk, we
100 * decompress them and then run the bio end_io routines on the
101 * decompressed pages (in the inode address space).
103 * This allows the checksumming and other IO error handling routines
106 * The compressed pages are freed here, and it must be run
109 static void end_compressed_bio_read(struct bio *bio, int err)
111 struct extent_io_tree *tree;
112 struct compressed_bio *cb = bio->bi_private;
121 /* if there are more bios still pending for this compressed
124 if (!atomic_dec_and_test(&cb->pending_bios))
127 /* ok, we're the last bio for this extent, lets start
131 tree = &BTRFS_I(inode)->io_tree;
132 ret = btrfs_zlib_decompress_biovec(cb->compressed_pages,
134 cb->orig_bio->bi_io_vec,
135 cb->orig_bio->bi_vcnt,
140 /* release the compressed pages */
142 for (index = 0; index < cb->nr_pages; index++) {
143 page = cb->compressed_pages[index];
144 page->mapping = NULL;
145 page_cache_release(page);
148 /* do io completion on the original bio */
150 bio_io_error(cb->orig_bio);
152 bio_endio(cb->orig_bio, 0);
154 /* finally free the cb struct */
155 kfree(cb->compressed_pages);
162 * Clear the writeback bits on all of the file
163 * pages for a compressed write
165 static noinline int end_compressed_writeback(struct inode *inode, u64 start,
166 unsigned long ram_size)
168 unsigned long index = start >> PAGE_CACHE_SHIFT;
169 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT;
170 struct page *pages[16];
171 unsigned long nr_pages = end_index - index + 1;
175 while(nr_pages > 0) {
176 ret = find_get_pages_contig(inode->i_mapping, index,
177 min(nr_pages, ARRAY_SIZE(pages)), pages);
183 for (i = 0; i < ret; i++) {
184 end_page_writeback(pages[i]);
185 page_cache_release(pages[i]);
190 /* the inode may be gone now */
195 * do the cleanup once all the compressed pages hit the disk.
196 * This will clear writeback on the file pages and free the compressed
199 * This also calls the writeback end hooks for the file pages so that
200 * metadata and checksums can be updated in the file.
202 static void end_compressed_bio_write(struct bio *bio, int err)
204 struct extent_io_tree *tree;
205 struct compressed_bio *cb = bio->bi_private;
213 /* if there are more bios still pending for this compressed
216 if (!atomic_dec_and_test(&cb->pending_bios))
219 /* ok, we're the last bio for this extent, step one is to
220 * call back into the FS and do all the end_io operations
223 tree = &BTRFS_I(inode)->io_tree;
224 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
225 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
227 cb->start + cb->len - 1,
229 cb->compressed_pages[0]->mapping = NULL;
231 end_compressed_writeback(inode, cb->start, cb->len);
232 /* note, our inode could be gone now */
235 * release the compressed pages, these came from alloc_page and
236 * are not attached to the inode at all
239 for (index = 0; index < cb->nr_pages; index++) {
240 page = cb->compressed_pages[index];
241 page->mapping = NULL;
242 page_cache_release(page);
245 /* finally free the cb struct */
246 kfree(cb->compressed_pages);
253 * worker function to build and submit bios for previously compressed pages.
254 * The corresponding pages in the inode should be marked for writeback
255 * and the compressed pages should have a reference on them for dropping
256 * when the IO is complete.
258 * This also checksums the file bytes and gets things ready for
261 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
262 unsigned long len, u64 disk_start,
263 unsigned long compressed_len,
264 struct page **compressed_pages,
265 unsigned long nr_pages)
267 struct bio *bio = NULL;
268 struct btrfs_root *root = BTRFS_I(inode)->root;
269 struct compressed_bio *cb;
270 unsigned long bytes_left;
271 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
274 u64 first_byte = disk_start;
275 struct block_device *bdev;
278 WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1));
279 cb = kmalloc(sizeof(*cb), GFP_NOFS);
280 atomic_set(&cb->pending_bios, 0);
285 cb->compressed_pages = compressed_pages;
286 cb->compressed_len = compressed_len;
288 cb->nr_pages = nr_pages;
290 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
292 ret = btrfs_csum_file_bytes(root, inode, start, len);
295 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
296 bio->bi_private = cb;
297 bio->bi_end_io = end_compressed_bio_write;
298 atomic_inc(&cb->pending_bios);
300 /* create and submit bios for the compressed pages */
301 bytes_left = compressed_len;
302 for (page_index = 0; page_index < cb->nr_pages; page_index++) {
303 page = compressed_pages[page_index];
304 page->mapping = inode->i_mapping;
306 ret = io_tree->ops->merge_bio_hook(page, 0,
312 page->mapping = NULL;
313 if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
318 * inc the count before we submit the bio so
319 * we know the end IO handler won't happen before
320 * we inc the count. Otherwise, the cb might get
321 * freed before we're done setting it up
323 atomic_inc(&cb->pending_bios);
324 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
327 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
332 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
333 bio->bi_private = cb;
334 bio->bi_end_io = end_compressed_bio_write;
335 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
337 if (bytes_left < PAGE_CACHE_SIZE) {
338 printk("bytes left %lu compress len %lu nr %lu\n",
339 bytes_left, cb->compressed_len, cb->nr_pages);
341 bytes_left -= PAGE_CACHE_SIZE;
342 first_byte += PAGE_CACHE_SIZE;
347 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
350 ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
357 static noinline int add_ra_bio_pages(struct inode *inode,
359 struct compressed_bio *cb)
361 unsigned long end_index;
362 unsigned long page_index;
364 u64 isize = i_size_read(inode);
367 unsigned long nr_pages = 0;
368 struct extent_map *em;
369 struct address_space *mapping = inode->i_mapping;
371 struct extent_map_tree *em_tree;
372 struct extent_io_tree *tree;
376 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
377 last_offset = (page_offset(page) + PAGE_CACHE_SIZE);
378 em_tree = &BTRFS_I(inode)->extent_tree;
379 tree = &BTRFS_I(inode)->io_tree;
384 end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
386 pagevec_init(&pvec, 0);
387 while(last_offset < compressed_end) {
388 page_index = last_offset >> PAGE_CACHE_SHIFT;
390 if (page_index > end_index)
394 page = radix_tree_lookup(&mapping->page_tree, page_index);
403 page = alloc_page(mapping_gfp_mask(mapping) | GFP_NOFS);
407 page->index = page_index;
409 * what we want to do here is call add_to_page_cache_lru,
410 * but that isn't exported, so we reproduce it here
412 if (add_to_page_cache(page, mapping,
413 page->index, GFP_NOFS)) {
414 page_cache_release(page);
418 /* open coding of lru_cache_add, also not exported */
419 page_cache_get(page);
420 if (!pagevec_add(&pvec, page))
421 __pagevec_lru_add(&pvec);
423 end = last_offset + PAGE_CACHE_SIZE - 1;
425 * at this point, we have a locked page in the page cache
426 * for these bytes in the file. But, we have to make
427 * sure they map to this compressed extent on disk.
429 set_page_extent_mapped(page);
430 lock_extent(tree, last_offset, end, GFP_NOFS);
431 spin_lock(&em_tree->lock);
432 em = lookup_extent_mapping(em_tree, last_offset,
434 spin_unlock(&em_tree->lock);
436 if (!em || last_offset < em->start ||
437 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
438 (em->block_start >> 9) != cb->orig_bio->bi_sector) {
440 unlock_extent(tree, last_offset, end, GFP_NOFS);
442 page_cache_release(page);
447 if (page->index == end_index) {
449 size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1);
453 zeros = PAGE_CACHE_SIZE - zero_offset;
454 userpage = kmap_atomic(page, KM_USER0);
455 memset(userpage + zero_offset, 0, zeros);
456 flush_dcache_page(page);
457 kunmap_atomic(userpage, KM_USER0);
461 ret = bio_add_page(cb->orig_bio, page,
464 if (ret == PAGE_CACHE_SIZE) {
466 page_cache_release(page);
468 unlock_extent(tree, last_offset, end, GFP_NOFS);
470 page_cache_release(page);
474 last_offset += PAGE_CACHE_SIZE;
476 if (pagevec_count(&pvec))
477 __pagevec_lru_add(&pvec);
482 * for a compressed read, the bio we get passed has all the inode pages
483 * in it. We don't actually do IO on those pages but allocate new ones
484 * to hold the compressed pages on disk.
486 * bio->bi_sector points to the compressed extent on disk
487 * bio->bi_io_vec points to all of the inode pages
488 * bio->bi_vcnt is a count of pages
490 * After the compressed pages are read, we copy the bytes into the
491 * bio we were passed and then call the bio end_io calls
493 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
494 int mirror_num, unsigned long bio_flags)
496 struct extent_io_tree *tree;
497 struct extent_map_tree *em_tree;
498 struct compressed_bio *cb;
499 struct btrfs_root *root = BTRFS_I(inode)->root;
500 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
501 unsigned long compressed_len;
502 unsigned long nr_pages;
503 unsigned long page_index;
505 struct block_device *bdev;
506 struct bio *comp_bio;
507 u64 cur_disk_byte = (u64)bio->bi_sector << 9;
508 struct extent_map *em;
511 tree = &BTRFS_I(inode)->io_tree;
512 em_tree = &BTRFS_I(inode)->extent_tree;
514 /* we need the actual starting offset of this extent in the file */
515 spin_lock(&em_tree->lock);
516 em = lookup_extent_mapping(em_tree,
517 page_offset(bio->bi_io_vec->bv_page),
519 spin_unlock(&em_tree->lock);
521 cb = kmalloc(sizeof(*cb), GFP_NOFS);
522 atomic_set(&cb->pending_bios, 0);
526 cb->start = em->orig_start;
527 compressed_len = em->block_len;
530 cb->len = uncompressed_len;
531 cb->compressed_len = compressed_len;
534 nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
536 cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
538 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
540 for (page_index = 0; page_index < nr_pages; page_index++) {
541 cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
544 cb->nr_pages = nr_pages;
546 add_ra_bio_pages(inode, em->start + em->len, cb);
548 if (!btrfs_test_opt(root, NODATASUM) &&
549 !btrfs_test_flag(inode, NODATASUM)) {
550 btrfs_lookup_bio_sums(root, inode, cb->orig_bio);
553 /* include any pages we added in add_ra-bio_pages */
554 uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
555 cb->len = uncompressed_len;
557 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
558 comp_bio->bi_private = cb;
559 comp_bio->bi_end_io = end_compressed_bio_read;
560 atomic_inc(&cb->pending_bios);
562 for (page_index = 0; page_index < nr_pages; page_index++) {
563 page = cb->compressed_pages[page_index];
564 page->mapping = inode->i_mapping;
565 if (comp_bio->bi_size)
566 ret = tree->ops->merge_bio_hook(page, 0,
572 page->mapping = NULL;
573 if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
577 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
581 * inc the count before we submit the bio so
582 * we know the end IO handler won't happen before
583 * we inc the count. Otherwise, the cb might get
584 * freed before we're done setting it up
586 atomic_inc(&cb->pending_bios);
588 ret = btrfs_map_bio(root, READ, comp_bio, 0, 0);
593 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
595 comp_bio->bi_private = cb;
596 comp_bio->bi_end_io = end_compressed_bio_read;
598 bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0);
600 cur_disk_byte += PAGE_CACHE_SIZE;
604 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
607 ret = btrfs_map_bio(root, READ, comp_bio, 0, 0);