2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
51 #include "ref-cache.h"
52 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
94 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
98 err = btrfs_init_acl(inode, dir);
100 err = btrfs_xattr_security_init(inode, dir);
105 * this does all the hard work for inserting an inline extent into
106 * the btree. The caller should have done a btrfs_drop_extents so that
107 * no overlapping inline items exist in the btree
109 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root, struct inode *inode,
111 u64 start, size_t size, size_t compressed_size,
112 struct page **compressed_pages)
114 struct btrfs_key key;
115 struct btrfs_path *path;
116 struct extent_buffer *leaf;
117 struct page *page = NULL;
120 struct btrfs_file_extent_item *ei;
123 size_t cur_size = size;
125 unsigned long offset;
126 int use_compress = 0;
128 if (compressed_size && compressed_pages) {
130 cur_size = compressed_size;
133 path = btrfs_alloc_path();
137 path->leave_spinning = 1;
138 btrfs_set_trans_block_group(trans, inode);
140 key.objectid = inode->i_ino;
142 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
143 datasize = btrfs_file_extent_calc_inline_size(cur_size);
145 inode_add_bytes(inode, size);
146 ret = btrfs_insert_empty_item(trans, root, path, &key,
153 leaf = path->nodes[0];
154 ei = btrfs_item_ptr(leaf, path->slots[0],
155 struct btrfs_file_extent_item);
156 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
157 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
158 btrfs_set_file_extent_encryption(leaf, ei, 0);
159 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
160 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
161 ptr = btrfs_file_extent_inline_start(ei);
166 while (compressed_size > 0) {
167 cpage = compressed_pages[i];
168 cur_size = min_t(unsigned long, compressed_size,
171 kaddr = kmap_atomic(cpage, KM_USER0);
172 write_extent_buffer(leaf, kaddr, ptr, cur_size);
173 kunmap_atomic(kaddr, KM_USER0);
177 compressed_size -= cur_size;
179 btrfs_set_file_extent_compression(leaf, ei,
180 BTRFS_COMPRESS_ZLIB);
182 page = find_get_page(inode->i_mapping,
183 start >> PAGE_CACHE_SHIFT);
184 btrfs_set_file_extent_compression(leaf, ei, 0);
185 kaddr = kmap_atomic(page, KM_USER0);
186 offset = start & (PAGE_CACHE_SIZE - 1);
187 write_extent_buffer(leaf, kaddr + offset, ptr, size);
188 kunmap_atomic(kaddr, KM_USER0);
189 page_cache_release(page);
191 btrfs_mark_buffer_dirty(leaf);
192 btrfs_free_path(path);
194 BTRFS_I(inode)->disk_i_size = inode->i_size;
195 btrfs_update_inode(trans, root, inode);
198 btrfs_free_path(path);
204 * conditionally insert an inline extent into the file. This
205 * does the checks required to make sure the data is small enough
206 * to fit as an inline extent.
208 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
209 struct btrfs_root *root,
210 struct inode *inode, u64 start, u64 end,
211 size_t compressed_size,
212 struct page **compressed_pages)
214 u64 isize = i_size_read(inode);
215 u64 actual_end = min(end + 1, isize);
216 u64 inline_len = actual_end - start;
217 u64 aligned_end = (end + root->sectorsize - 1) &
218 ~((u64)root->sectorsize - 1);
220 u64 data_len = inline_len;
224 data_len = compressed_size;
227 actual_end >= PAGE_CACHE_SIZE ||
228 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
230 (actual_end & (root->sectorsize - 1)) == 0) ||
232 data_len > root->fs_info->max_inline) {
236 ret = btrfs_drop_extents(trans, root, inode, start,
237 aligned_end, start, &hint_byte);
240 if (isize > actual_end)
241 inline_len = min_t(u64, isize, actual_end);
242 ret = insert_inline_extent(trans, root, inode, start,
243 inline_len, compressed_size,
246 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
250 struct async_extent {
255 unsigned long nr_pages;
256 struct list_head list;
261 struct btrfs_root *root;
262 struct page *locked_page;
265 struct list_head extents;
266 struct btrfs_work work;
269 static noinline int add_async_extent(struct async_cow *cow,
270 u64 start, u64 ram_size,
273 unsigned long nr_pages)
275 struct async_extent *async_extent;
277 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
278 async_extent->start = start;
279 async_extent->ram_size = ram_size;
280 async_extent->compressed_size = compressed_size;
281 async_extent->pages = pages;
282 async_extent->nr_pages = nr_pages;
283 list_add_tail(&async_extent->list, &cow->extents);
288 * we create compressed extents in two phases. The first
289 * phase compresses a range of pages that have already been
290 * locked (both pages and state bits are locked).
292 * This is done inside an ordered work queue, and the compression
293 * is spread across many cpus. The actual IO submission is step
294 * two, and the ordered work queue takes care of making sure that
295 * happens in the same order things were put onto the queue by
296 * writepages and friends.
298 * If this code finds it can't get good compression, it puts an
299 * entry onto the work queue to write the uncompressed bytes. This
300 * makes sure that both compressed inodes and uncompressed inodes
301 * are written in the same order that pdflush sent them down.
303 static noinline int compress_file_range(struct inode *inode,
304 struct page *locked_page,
306 struct async_cow *async_cow,
309 struct btrfs_root *root = BTRFS_I(inode)->root;
310 struct btrfs_trans_handle *trans;
314 u64 blocksize = root->sectorsize;
316 u64 isize = i_size_read(inode);
318 struct page **pages = NULL;
319 unsigned long nr_pages;
320 unsigned long nr_pages_ret = 0;
321 unsigned long total_compressed = 0;
322 unsigned long total_in = 0;
323 unsigned long max_compressed = 128 * 1024;
324 unsigned long max_uncompressed = 128 * 1024;
330 actual_end = min_t(u64, isize, end + 1);
333 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
334 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
337 * we don't want to send crud past the end of i_size through
338 * compression, that's just a waste of CPU time. So, if the
339 * end of the file is before the start of our current
340 * requested range of bytes, we bail out to the uncompressed
341 * cleanup code that can deal with all of this.
343 * It isn't really the fastest way to fix things, but this is a
344 * very uncommon corner.
346 if (actual_end <= start)
347 goto cleanup_and_bail_uncompressed;
349 total_compressed = actual_end - start;
351 /* we want to make sure that amount of ram required to uncompress
352 * an extent is reasonable, so we limit the total size in ram
353 * of a compressed extent to 128k. This is a crucial number
354 * because it also controls how easily we can spread reads across
355 * cpus for decompression.
357 * We also want to make sure the amount of IO required to do
358 * a random read is reasonably small, so we limit the size of
359 * a compressed extent to 128k.
361 total_compressed = min(total_compressed, max_uncompressed);
362 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
363 num_bytes = max(blocksize, num_bytes);
364 disk_num_bytes = num_bytes;
369 * we do compression for mount -o compress and when the
370 * inode has not been flagged as nocompress. This flag can
371 * change at any time if we discover bad compression ratios.
373 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
374 btrfs_test_opt(root, COMPRESS)) {
376 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
378 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
379 total_compressed, pages,
380 nr_pages, &nr_pages_ret,
386 unsigned long offset = total_compressed &
387 (PAGE_CACHE_SIZE - 1);
388 struct page *page = pages[nr_pages_ret - 1];
391 /* zero the tail end of the last page, we might be
392 * sending it down to disk
395 kaddr = kmap_atomic(page, KM_USER0);
396 memset(kaddr + offset, 0,
397 PAGE_CACHE_SIZE - offset);
398 kunmap_atomic(kaddr, KM_USER0);
404 trans = btrfs_join_transaction(root, 1);
406 btrfs_set_trans_block_group(trans, inode);
408 /* lets try to make an inline extent */
409 if (ret || total_in < (actual_end - start)) {
410 /* we didn't compress the entire range, try
411 * to make an uncompressed inline extent.
413 ret = cow_file_range_inline(trans, root, inode,
414 start, end, 0, NULL);
416 /* try making a compressed inline extent */
417 ret = cow_file_range_inline(trans, root, inode,
419 total_compressed, pages);
421 btrfs_end_transaction(trans, root);
424 * inline extent creation worked, we don't need
425 * to create any more async work items. Unlock
426 * and free up our temp pages.
428 extent_clear_unlock_delalloc(inode,
429 &BTRFS_I(inode)->io_tree,
430 start, end, NULL, 1, 0,
439 * we aren't doing an inline extent round the compressed size
440 * up to a block size boundary so the allocator does sane
443 total_compressed = (total_compressed + blocksize - 1) &
447 * one last check to make sure the compression is really a
448 * win, compare the page count read with the blocks on disk
450 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
451 ~(PAGE_CACHE_SIZE - 1);
452 if (total_compressed >= total_in) {
455 disk_num_bytes = total_compressed;
456 num_bytes = total_in;
459 if (!will_compress && pages) {
461 * the compression code ran but failed to make things smaller,
462 * free any pages it allocated and our page pointer array
464 for (i = 0; i < nr_pages_ret; i++) {
465 WARN_ON(pages[i]->mapping);
466 page_cache_release(pages[i]);
470 total_compressed = 0;
473 /* flag the file so we don't compress in the future */
474 btrfs_set_flag(inode, NOCOMPRESS);
479 /* the async work queues will take care of doing actual
480 * allocation on disk for these compressed pages,
481 * and will submit them to the elevator.
483 add_async_extent(async_cow, start, num_bytes,
484 total_compressed, pages, nr_pages_ret);
486 if (start + num_bytes < end && start + num_bytes < actual_end) {
493 cleanup_and_bail_uncompressed:
495 * No compression, but we still need to write the pages in
496 * the file we've been given so far. redirty the locked
497 * page if it corresponds to our extent and set things up
498 * for the async work queue to run cow_file_range to do
499 * the normal delalloc dance
501 if (page_offset(locked_page) >= start &&
502 page_offset(locked_page) <= end) {
503 __set_page_dirty_nobuffers(locked_page);
504 /* unlocked later on in the async handlers */
506 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
514 for (i = 0; i < nr_pages_ret; i++) {
515 WARN_ON(pages[i]->mapping);
516 page_cache_release(pages[i]);
524 * phase two of compressed writeback. This is the ordered portion
525 * of the code, which only gets called in the order the work was
526 * queued. We walk all the async extents created by compress_file_range
527 * and send them down to the disk.
529 static noinline int submit_compressed_extents(struct inode *inode,
530 struct async_cow *async_cow)
532 struct async_extent *async_extent;
534 struct btrfs_trans_handle *trans;
535 struct btrfs_key ins;
536 struct extent_map *em;
537 struct btrfs_root *root = BTRFS_I(inode)->root;
538 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
539 struct extent_io_tree *io_tree;
542 if (list_empty(&async_cow->extents))
545 trans = btrfs_join_transaction(root, 1);
547 while (!list_empty(&async_cow->extents)) {
548 async_extent = list_entry(async_cow->extents.next,
549 struct async_extent, list);
550 list_del(&async_extent->list);
552 io_tree = &BTRFS_I(inode)->io_tree;
554 /* did the compression code fall back to uncompressed IO? */
555 if (!async_extent->pages) {
556 int page_started = 0;
557 unsigned long nr_written = 0;
559 lock_extent(io_tree, async_extent->start,
560 async_extent->start +
561 async_extent->ram_size - 1, GFP_NOFS);
563 /* allocate blocks */
564 cow_file_range(inode, async_cow->locked_page,
566 async_extent->start +
567 async_extent->ram_size - 1,
568 &page_started, &nr_written, 0);
571 * if page_started, cow_file_range inserted an
572 * inline extent and took care of all the unlocking
573 * and IO for us. Otherwise, we need to submit
574 * all those pages down to the drive.
577 extent_write_locked_range(io_tree,
578 inode, async_extent->start,
579 async_extent->start +
580 async_extent->ram_size - 1,
588 lock_extent(io_tree, async_extent->start,
589 async_extent->start + async_extent->ram_size - 1,
592 * here we're doing allocation and writeback of the
595 btrfs_drop_extent_cache(inode, async_extent->start,
596 async_extent->start +
597 async_extent->ram_size - 1, 0);
599 ret = btrfs_reserve_extent(trans, root,
600 async_extent->compressed_size,
601 async_extent->compressed_size,
605 em = alloc_extent_map(GFP_NOFS);
606 em->start = async_extent->start;
607 em->len = async_extent->ram_size;
608 em->orig_start = em->start;
610 em->block_start = ins.objectid;
611 em->block_len = ins.offset;
612 em->bdev = root->fs_info->fs_devices->latest_bdev;
613 set_bit(EXTENT_FLAG_PINNED, &em->flags);
614 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
617 spin_lock(&em_tree->lock);
618 ret = add_extent_mapping(em_tree, em);
619 spin_unlock(&em_tree->lock);
620 if (ret != -EEXIST) {
624 btrfs_drop_extent_cache(inode, async_extent->start,
625 async_extent->start +
626 async_extent->ram_size - 1, 0);
629 ret = btrfs_add_ordered_extent(inode, async_extent->start,
631 async_extent->ram_size,
633 BTRFS_ORDERED_COMPRESSED);
636 btrfs_end_transaction(trans, root);
639 * clear dirty, set writeback and unlock the pages.
641 extent_clear_unlock_delalloc(inode,
642 &BTRFS_I(inode)->io_tree,
644 async_extent->start +
645 async_extent->ram_size - 1,
646 NULL, 1, 1, 0, 1, 1, 0);
648 ret = btrfs_submit_compressed_write(inode,
650 async_extent->ram_size,
652 ins.offset, async_extent->pages,
653 async_extent->nr_pages);
656 trans = btrfs_join_transaction(root, 1);
657 alloc_hint = ins.objectid + ins.offset;
662 btrfs_end_transaction(trans, root);
667 * when extent_io.c finds a delayed allocation range in the file,
668 * the call backs end up in this code. The basic idea is to
669 * allocate extents on disk for the range, and create ordered data structs
670 * in ram to track those extents.
672 * locked_page is the page that writepage had locked already. We use
673 * it to make sure we don't do extra locks or unlocks.
675 * *page_started is set to one if we unlock locked_page and do everything
676 * required to start IO on it. It may be clean and already done with
679 static noinline int cow_file_range(struct inode *inode,
680 struct page *locked_page,
681 u64 start, u64 end, int *page_started,
682 unsigned long *nr_written,
685 struct btrfs_root *root = BTRFS_I(inode)->root;
686 struct btrfs_trans_handle *trans;
689 unsigned long ram_size;
692 u64 blocksize = root->sectorsize;
694 u64 isize = i_size_read(inode);
695 struct btrfs_key ins;
696 struct extent_map *em;
697 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
700 trans = btrfs_join_transaction(root, 1);
702 btrfs_set_trans_block_group(trans, inode);
704 actual_end = min_t(u64, isize, end + 1);
706 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
707 num_bytes = max(blocksize, num_bytes);
708 disk_num_bytes = num_bytes;
712 /* lets try to make an inline extent */
713 ret = cow_file_range_inline(trans, root, inode,
714 start, end, 0, NULL);
716 extent_clear_unlock_delalloc(inode,
717 &BTRFS_I(inode)->io_tree,
718 start, end, NULL, 1, 1,
720 *nr_written = *nr_written +
721 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
728 BUG_ON(disk_num_bytes >
729 btrfs_super_total_bytes(&root->fs_info->super_copy));
731 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
733 while (disk_num_bytes > 0) {
734 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
735 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
736 root->sectorsize, 0, alloc_hint,
740 em = alloc_extent_map(GFP_NOFS);
742 em->orig_start = em->start;
744 ram_size = ins.offset;
745 em->len = ins.offset;
747 em->block_start = ins.objectid;
748 em->block_len = ins.offset;
749 em->bdev = root->fs_info->fs_devices->latest_bdev;
750 set_bit(EXTENT_FLAG_PINNED, &em->flags);
753 spin_lock(&em_tree->lock);
754 ret = add_extent_mapping(em_tree, em);
755 spin_unlock(&em_tree->lock);
756 if (ret != -EEXIST) {
760 btrfs_drop_extent_cache(inode, start,
761 start + ram_size - 1, 0);
764 cur_alloc_size = ins.offset;
765 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
766 ram_size, cur_alloc_size, 0);
769 if (root->root_key.objectid ==
770 BTRFS_DATA_RELOC_TREE_OBJECTID) {
771 ret = btrfs_reloc_clone_csums(inode, start,
776 if (disk_num_bytes < cur_alloc_size)
779 /* we're not doing compressed IO, don't unlock the first
780 * page (which the caller expects to stay locked), don't
781 * clear any dirty bits and don't set any writeback bits
783 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
784 start, start + ram_size - 1,
785 locked_page, unlock, 1,
787 disk_num_bytes -= cur_alloc_size;
788 num_bytes -= cur_alloc_size;
789 alloc_hint = ins.objectid + ins.offset;
790 start += cur_alloc_size;
794 btrfs_end_transaction(trans, root);
800 * work queue call back to started compression on a file and pages
802 static noinline void async_cow_start(struct btrfs_work *work)
804 struct async_cow *async_cow;
806 async_cow = container_of(work, struct async_cow, work);
808 compress_file_range(async_cow->inode, async_cow->locked_page,
809 async_cow->start, async_cow->end, async_cow,
812 async_cow->inode = NULL;
816 * work queue call back to submit previously compressed pages
818 static noinline void async_cow_submit(struct btrfs_work *work)
820 struct async_cow *async_cow;
821 struct btrfs_root *root;
822 unsigned long nr_pages;
824 async_cow = container_of(work, struct async_cow, work);
826 root = async_cow->root;
827 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
830 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
832 if (atomic_read(&root->fs_info->async_delalloc_pages) <
834 waitqueue_active(&root->fs_info->async_submit_wait))
835 wake_up(&root->fs_info->async_submit_wait);
837 if (async_cow->inode)
838 submit_compressed_extents(async_cow->inode, async_cow);
841 static noinline void async_cow_free(struct btrfs_work *work)
843 struct async_cow *async_cow;
844 async_cow = container_of(work, struct async_cow, work);
848 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
849 u64 start, u64 end, int *page_started,
850 unsigned long *nr_written)
852 struct async_cow *async_cow;
853 struct btrfs_root *root = BTRFS_I(inode)->root;
854 unsigned long nr_pages;
856 int limit = 10 * 1024 * 1042;
858 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
859 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
860 while (start < end) {
861 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
862 async_cow->inode = inode;
863 async_cow->root = root;
864 async_cow->locked_page = locked_page;
865 async_cow->start = start;
867 if (btrfs_test_flag(inode, NOCOMPRESS))
870 cur_end = min(end, start + 512 * 1024 - 1);
872 async_cow->end = cur_end;
873 INIT_LIST_HEAD(&async_cow->extents);
875 async_cow->work.func = async_cow_start;
876 async_cow->work.ordered_func = async_cow_submit;
877 async_cow->work.ordered_free = async_cow_free;
878 async_cow->work.flags = 0;
880 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
882 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
884 btrfs_queue_worker(&root->fs_info->delalloc_workers,
887 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
888 wait_event(root->fs_info->async_submit_wait,
889 (atomic_read(&root->fs_info->async_delalloc_pages) <
893 while (atomic_read(&root->fs_info->async_submit_draining) &&
894 atomic_read(&root->fs_info->async_delalloc_pages)) {
895 wait_event(root->fs_info->async_submit_wait,
896 (atomic_read(&root->fs_info->async_delalloc_pages) ==
900 *nr_written += nr_pages;
907 static noinline int csum_exist_in_range(struct btrfs_root *root,
908 u64 bytenr, u64 num_bytes)
911 struct btrfs_ordered_sum *sums;
914 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
915 bytenr + num_bytes - 1, &list);
916 if (ret == 0 && list_empty(&list))
919 while (!list_empty(&list)) {
920 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
921 list_del(&sums->list);
928 * when nowcow writeback call back. This checks for snapshots or COW copies
929 * of the extents that exist in the file, and COWs the file as required.
931 * If no cow copies or snapshots exist, we write directly to the existing
934 static noinline int run_delalloc_nocow(struct inode *inode,
935 struct page *locked_page,
936 u64 start, u64 end, int *page_started, int force,
937 unsigned long *nr_written)
939 struct btrfs_root *root = BTRFS_I(inode)->root;
940 struct btrfs_trans_handle *trans;
941 struct extent_buffer *leaf;
942 struct btrfs_path *path;
943 struct btrfs_file_extent_item *fi;
944 struct btrfs_key found_key;
956 path = btrfs_alloc_path();
958 trans = btrfs_join_transaction(root, 1);
964 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
967 if (ret > 0 && path->slots[0] > 0 && check_prev) {
968 leaf = path->nodes[0];
969 btrfs_item_key_to_cpu(leaf, &found_key,
971 if (found_key.objectid == inode->i_ino &&
972 found_key.type == BTRFS_EXTENT_DATA_KEY)
977 leaf = path->nodes[0];
978 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
979 ret = btrfs_next_leaf(root, path);
984 leaf = path->nodes[0];
990 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
992 if (found_key.objectid > inode->i_ino ||
993 found_key.type > BTRFS_EXTENT_DATA_KEY ||
994 found_key.offset > end)
997 if (found_key.offset > cur_offset) {
998 extent_end = found_key.offset;
1002 fi = btrfs_item_ptr(leaf, path->slots[0],
1003 struct btrfs_file_extent_item);
1004 extent_type = btrfs_file_extent_type(leaf, fi);
1006 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1007 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1008 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1009 extent_end = found_key.offset +
1010 btrfs_file_extent_num_bytes(leaf, fi);
1011 if (extent_end <= start) {
1015 if (disk_bytenr == 0)
1017 if (btrfs_file_extent_compression(leaf, fi) ||
1018 btrfs_file_extent_encryption(leaf, fi) ||
1019 btrfs_file_extent_other_encoding(leaf, fi))
1021 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1023 if (btrfs_extent_readonly(root, disk_bytenr))
1025 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1028 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1029 disk_bytenr += cur_offset - found_key.offset;
1030 num_bytes = min(end + 1, extent_end) - cur_offset;
1032 * force cow if csum exists in the range.
1033 * this ensure that csum for a given extent are
1034 * either valid or do not exist.
1036 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1039 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1040 extent_end = found_key.offset +
1041 btrfs_file_extent_inline_len(leaf, fi);
1042 extent_end = ALIGN(extent_end, root->sectorsize);
1047 if (extent_end <= start) {
1052 if (cow_start == (u64)-1)
1053 cow_start = cur_offset;
1054 cur_offset = extent_end;
1055 if (cur_offset > end)
1061 btrfs_release_path(root, path);
1062 if (cow_start != (u64)-1) {
1063 ret = cow_file_range(inode, locked_page, cow_start,
1064 found_key.offset - 1, page_started,
1067 cow_start = (u64)-1;
1070 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1071 struct extent_map *em;
1072 struct extent_map_tree *em_tree;
1073 em_tree = &BTRFS_I(inode)->extent_tree;
1074 em = alloc_extent_map(GFP_NOFS);
1075 em->start = cur_offset;
1076 em->orig_start = em->start;
1077 em->len = num_bytes;
1078 em->block_len = num_bytes;
1079 em->block_start = disk_bytenr;
1080 em->bdev = root->fs_info->fs_devices->latest_bdev;
1081 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1083 spin_lock(&em_tree->lock);
1084 ret = add_extent_mapping(em_tree, em);
1085 spin_unlock(&em_tree->lock);
1086 if (ret != -EEXIST) {
1087 free_extent_map(em);
1090 btrfs_drop_extent_cache(inode, em->start,
1091 em->start + em->len - 1, 0);
1093 type = BTRFS_ORDERED_PREALLOC;
1095 type = BTRFS_ORDERED_NOCOW;
1098 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1099 num_bytes, num_bytes, type);
1102 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1103 cur_offset, cur_offset + num_bytes - 1,
1104 locked_page, 1, 1, 1, 0, 0, 0);
1105 cur_offset = extent_end;
1106 if (cur_offset > end)
1109 btrfs_release_path(root, path);
1111 if (cur_offset <= end && cow_start == (u64)-1)
1112 cow_start = cur_offset;
1113 if (cow_start != (u64)-1) {
1114 ret = cow_file_range(inode, locked_page, cow_start, end,
1115 page_started, nr_written, 1);
1119 ret = btrfs_end_transaction(trans, root);
1121 btrfs_free_path(path);
1126 * extent_io.c call back to do delayed allocation processing
1128 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1129 u64 start, u64 end, int *page_started,
1130 unsigned long *nr_written)
1133 struct btrfs_root *root = BTRFS_I(inode)->root;
1135 if (btrfs_test_flag(inode, NODATACOW))
1136 ret = run_delalloc_nocow(inode, locked_page, start, end,
1137 page_started, 1, nr_written);
1138 else if (btrfs_test_flag(inode, PREALLOC))
1139 ret = run_delalloc_nocow(inode, locked_page, start, end,
1140 page_started, 0, nr_written);
1141 else if (!btrfs_test_opt(root, COMPRESS))
1142 ret = cow_file_range(inode, locked_page, start, end,
1143 page_started, nr_written, 1);
1145 ret = cow_file_range_async(inode, locked_page, start, end,
1146 page_started, nr_written);
1151 * extent_io.c set_bit_hook, used to track delayed allocation
1152 * bytes in this file, and to maintain the list of inodes that
1153 * have pending delalloc work to be done.
1155 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1156 unsigned long old, unsigned long bits)
1159 * set_bit and clear bit hooks normally require _irqsave/restore
1160 * but in this case, we are only testeing for the DELALLOC
1161 * bit, which is only set or cleared with irqs on
1163 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1164 struct btrfs_root *root = BTRFS_I(inode)->root;
1165 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1166 spin_lock(&root->fs_info->delalloc_lock);
1167 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1168 root->fs_info->delalloc_bytes += end - start + 1;
1169 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1170 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1171 &root->fs_info->delalloc_inodes);
1173 spin_unlock(&root->fs_info->delalloc_lock);
1179 * extent_io.c clear_bit_hook, see set_bit_hook for why
1181 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1182 unsigned long old, unsigned long bits)
1185 * set_bit and clear bit hooks normally require _irqsave/restore
1186 * but in this case, we are only testeing for the DELALLOC
1187 * bit, which is only set or cleared with irqs on
1189 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1190 struct btrfs_root *root = BTRFS_I(inode)->root;
1192 spin_lock(&root->fs_info->delalloc_lock);
1193 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1194 printk(KERN_INFO "btrfs warning: delalloc account "
1196 (unsigned long long)end - start + 1,
1197 (unsigned long long)
1198 root->fs_info->delalloc_bytes);
1199 btrfs_delalloc_free_space(root, inode, (u64)-1);
1200 root->fs_info->delalloc_bytes = 0;
1201 BTRFS_I(inode)->delalloc_bytes = 0;
1203 btrfs_delalloc_free_space(root, inode,
1205 root->fs_info->delalloc_bytes -= end - start + 1;
1206 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1208 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1209 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1210 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1212 spin_unlock(&root->fs_info->delalloc_lock);
1218 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1219 * we don't create bios that span stripes or chunks
1221 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1222 size_t size, struct bio *bio,
1223 unsigned long bio_flags)
1225 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1226 struct btrfs_mapping_tree *map_tree;
1227 u64 logical = (u64)bio->bi_sector << 9;
1232 if (bio_flags & EXTENT_BIO_COMPRESSED)
1235 length = bio->bi_size;
1236 map_tree = &root->fs_info->mapping_tree;
1237 map_length = length;
1238 ret = btrfs_map_block(map_tree, READ, logical,
1239 &map_length, NULL, 0);
1241 if (map_length < length + size)
1247 * in order to insert checksums into the metadata in large chunks,
1248 * we wait until bio submission time. All the pages in the bio are
1249 * checksummed and sums are attached onto the ordered extent record.
1251 * At IO completion time the cums attached on the ordered extent record
1252 * are inserted into the btree
1254 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1255 struct bio *bio, int mirror_num,
1256 unsigned long bio_flags)
1258 struct btrfs_root *root = BTRFS_I(inode)->root;
1261 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1267 * in order to insert checksums into the metadata in large chunks,
1268 * we wait until bio submission time. All the pages in the bio are
1269 * checksummed and sums are attached onto the ordered extent record.
1271 * At IO completion time the cums attached on the ordered extent record
1272 * are inserted into the btree
1274 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1275 int mirror_num, unsigned long bio_flags)
1277 struct btrfs_root *root = BTRFS_I(inode)->root;
1278 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1282 * extent_io.c submission hook. This does the right thing for csum calculation
1283 * on write, or reading the csums from the tree before a read
1285 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1286 int mirror_num, unsigned long bio_flags)
1288 struct btrfs_root *root = BTRFS_I(inode)->root;
1292 skip_sum = btrfs_test_flag(inode, NODATASUM);
1294 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1297 if (!(rw & (1 << BIO_RW))) {
1298 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1299 return btrfs_submit_compressed_read(inode, bio,
1300 mirror_num, bio_flags);
1301 } else if (!skip_sum)
1302 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1304 } else if (!skip_sum) {
1305 /* csum items have already been cloned */
1306 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1308 /* we're doing a write, do the async checksumming */
1309 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1310 inode, rw, bio, mirror_num,
1311 bio_flags, __btrfs_submit_bio_start,
1312 __btrfs_submit_bio_done);
1316 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1320 * given a list of ordered sums record them in the inode. This happens
1321 * at IO completion time based on sums calculated at bio submission time.
1323 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1324 struct inode *inode, u64 file_offset,
1325 struct list_head *list)
1327 struct btrfs_ordered_sum *sum;
1329 btrfs_set_trans_block_group(trans, inode);
1331 list_for_each_entry(sum, list, list) {
1332 btrfs_csum_file_blocks(trans,
1333 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1338 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1340 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1342 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1346 /* see btrfs_writepage_start_hook for details on why this is required */
1347 struct btrfs_writepage_fixup {
1349 struct btrfs_work work;
1352 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1354 struct btrfs_writepage_fixup *fixup;
1355 struct btrfs_ordered_extent *ordered;
1357 struct inode *inode;
1361 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1365 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1366 ClearPageChecked(page);
1370 inode = page->mapping->host;
1371 page_start = page_offset(page);
1372 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1374 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1376 /* already ordered? We're done */
1377 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1378 EXTENT_ORDERED, 0)) {
1382 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1384 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1385 page_end, GFP_NOFS);
1387 btrfs_start_ordered_extent(inode, ordered, 1);
1391 btrfs_set_extent_delalloc(inode, page_start, page_end);
1392 ClearPageChecked(page);
1394 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1397 page_cache_release(page);
1401 * There are a few paths in the higher layers of the kernel that directly
1402 * set the page dirty bit without asking the filesystem if it is a
1403 * good idea. This causes problems because we want to make sure COW
1404 * properly happens and the data=ordered rules are followed.
1406 * In our case any range that doesn't have the ORDERED bit set
1407 * hasn't been properly setup for IO. We kick off an async process
1408 * to fix it up. The async helper will wait for ordered extents, set
1409 * the delalloc bit and make it safe to write the page.
1411 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1413 struct inode *inode = page->mapping->host;
1414 struct btrfs_writepage_fixup *fixup;
1415 struct btrfs_root *root = BTRFS_I(inode)->root;
1418 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1423 if (PageChecked(page))
1426 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1430 SetPageChecked(page);
1431 page_cache_get(page);
1432 fixup->work.func = btrfs_writepage_fixup_worker;
1434 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1438 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1439 struct inode *inode, u64 file_pos,
1440 u64 disk_bytenr, u64 disk_num_bytes,
1441 u64 num_bytes, u64 ram_bytes,
1442 u8 compression, u8 encryption,
1443 u16 other_encoding, int extent_type)
1445 struct btrfs_root *root = BTRFS_I(inode)->root;
1446 struct btrfs_file_extent_item *fi;
1447 struct btrfs_path *path;
1448 struct extent_buffer *leaf;
1449 struct btrfs_key ins;
1453 path = btrfs_alloc_path();
1456 path->leave_spinning = 1;
1457 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1458 file_pos + num_bytes, file_pos, &hint);
1461 ins.objectid = inode->i_ino;
1462 ins.offset = file_pos;
1463 ins.type = BTRFS_EXTENT_DATA_KEY;
1464 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1466 leaf = path->nodes[0];
1467 fi = btrfs_item_ptr(leaf, path->slots[0],
1468 struct btrfs_file_extent_item);
1469 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1470 btrfs_set_file_extent_type(leaf, fi, extent_type);
1471 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1472 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1473 btrfs_set_file_extent_offset(leaf, fi, 0);
1474 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1475 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1476 btrfs_set_file_extent_compression(leaf, fi, compression);
1477 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1478 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1480 btrfs_unlock_up_safe(path, 1);
1481 btrfs_set_lock_blocking(leaf);
1483 btrfs_mark_buffer_dirty(leaf);
1485 inode_add_bytes(inode, num_bytes);
1486 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1488 ins.objectid = disk_bytenr;
1489 ins.offset = disk_num_bytes;
1490 ins.type = BTRFS_EXTENT_ITEM_KEY;
1491 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1492 root->root_key.objectid,
1493 trans->transid, inode->i_ino, &ins);
1495 btrfs_free_path(path);
1501 * helper function for btrfs_finish_ordered_io, this
1502 * just reads in some of the csum leaves to prime them into ram
1503 * before we start the transaction. It limits the amount of btree
1504 * reads required while inside the transaction.
1506 static noinline void reada_csum(struct btrfs_root *root,
1507 struct btrfs_path *path,
1508 struct btrfs_ordered_extent *ordered_extent)
1510 struct btrfs_ordered_sum *sum;
1513 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1515 bytenr = sum->sums[0].bytenr;
1518 * we don't care about the results, the point of this search is
1519 * just to get the btree leaves into ram
1521 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1524 /* as ordered data IO finishes, this gets called so we can finish
1525 * an ordered extent if the range of bytes in the file it covers are
1528 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1530 struct btrfs_root *root = BTRFS_I(inode)->root;
1531 struct btrfs_trans_handle *trans;
1532 struct btrfs_ordered_extent *ordered_extent = NULL;
1533 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1534 struct btrfs_path *path;
1538 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1543 * before we join the transaction, try to do some of our IO.
1544 * This will limit the amount of IO that we have to do with
1545 * the transaction running. We're unlikely to need to do any
1546 * IO if the file extents are new, the disk_i_size checks
1547 * covers the most common case.
1549 if (start < BTRFS_I(inode)->disk_i_size) {
1550 path = btrfs_alloc_path();
1552 ret = btrfs_lookup_file_extent(NULL, root, path,
1555 ordered_extent = btrfs_lookup_ordered_extent(inode,
1557 if (!list_empty(&ordered_extent->list)) {
1558 btrfs_release_path(root, path);
1559 reada_csum(root, path, ordered_extent);
1561 btrfs_free_path(path);
1565 trans = btrfs_join_transaction(root, 1);
1567 if (!ordered_extent)
1568 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1569 BUG_ON(!ordered_extent);
1570 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1573 lock_extent(io_tree, ordered_extent->file_offset,
1574 ordered_extent->file_offset + ordered_extent->len - 1,
1577 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1579 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1581 ret = btrfs_mark_extent_written(trans, root, inode,
1582 ordered_extent->file_offset,
1583 ordered_extent->file_offset +
1584 ordered_extent->len);
1587 ret = insert_reserved_file_extent(trans, inode,
1588 ordered_extent->file_offset,
1589 ordered_extent->start,
1590 ordered_extent->disk_len,
1591 ordered_extent->len,
1592 ordered_extent->len,
1594 BTRFS_FILE_EXTENT_REG);
1597 unlock_extent(io_tree, ordered_extent->file_offset,
1598 ordered_extent->file_offset + ordered_extent->len - 1,
1601 add_pending_csums(trans, inode, ordered_extent->file_offset,
1602 &ordered_extent->list);
1604 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1605 btrfs_ordered_update_i_size(inode, ordered_extent);
1606 btrfs_update_inode(trans, root, inode);
1607 btrfs_remove_ordered_extent(inode, ordered_extent);
1608 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1611 btrfs_put_ordered_extent(ordered_extent);
1612 /* once for the tree */
1613 btrfs_put_ordered_extent(ordered_extent);
1615 btrfs_end_transaction(trans, root);
1619 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1620 struct extent_state *state, int uptodate)
1622 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1626 * When IO fails, either with EIO or csum verification fails, we
1627 * try other mirrors that might have a good copy of the data. This
1628 * io_failure_record is used to record state as we go through all the
1629 * mirrors. If another mirror has good data, the page is set up to date
1630 * and things continue. If a good mirror can't be found, the original
1631 * bio end_io callback is called to indicate things have failed.
1633 struct io_failure_record {
1638 unsigned long bio_flags;
1642 static int btrfs_io_failed_hook(struct bio *failed_bio,
1643 struct page *page, u64 start, u64 end,
1644 struct extent_state *state)
1646 struct io_failure_record *failrec = NULL;
1648 struct extent_map *em;
1649 struct inode *inode = page->mapping->host;
1650 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1651 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1658 ret = get_state_private(failure_tree, start, &private);
1660 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1663 failrec->start = start;
1664 failrec->len = end - start + 1;
1665 failrec->last_mirror = 0;
1666 failrec->bio_flags = 0;
1668 spin_lock(&em_tree->lock);
1669 em = lookup_extent_mapping(em_tree, start, failrec->len);
1670 if (em->start > start || em->start + em->len < start) {
1671 free_extent_map(em);
1674 spin_unlock(&em_tree->lock);
1676 if (!em || IS_ERR(em)) {
1680 logical = start - em->start;
1681 logical = em->block_start + logical;
1682 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1683 logical = em->block_start;
1684 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1686 failrec->logical = logical;
1687 free_extent_map(em);
1688 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1689 EXTENT_DIRTY, GFP_NOFS);
1690 set_state_private(failure_tree, start,
1691 (u64)(unsigned long)failrec);
1693 failrec = (struct io_failure_record *)(unsigned long)private;
1695 num_copies = btrfs_num_copies(
1696 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1697 failrec->logical, failrec->len);
1698 failrec->last_mirror++;
1700 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1701 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1704 if (state && state->start != failrec->start)
1706 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1708 if (!state || failrec->last_mirror > num_copies) {
1709 set_state_private(failure_tree, failrec->start, 0);
1710 clear_extent_bits(failure_tree, failrec->start,
1711 failrec->start + failrec->len - 1,
1712 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1716 bio = bio_alloc(GFP_NOFS, 1);
1717 bio->bi_private = state;
1718 bio->bi_end_io = failed_bio->bi_end_io;
1719 bio->bi_sector = failrec->logical >> 9;
1720 bio->bi_bdev = failed_bio->bi_bdev;
1723 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1724 if (failed_bio->bi_rw & (1 << BIO_RW))
1729 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1730 failrec->last_mirror,
1731 failrec->bio_flags);
1736 * each time an IO finishes, we do a fast check in the IO failure tree
1737 * to see if we need to process or clean up an io_failure_record
1739 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1742 u64 private_failure;
1743 struct io_failure_record *failure;
1747 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1748 (u64)-1, 1, EXTENT_DIRTY)) {
1749 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1750 start, &private_failure);
1752 failure = (struct io_failure_record *)(unsigned long)
1754 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1756 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1758 failure->start + failure->len - 1,
1759 EXTENT_DIRTY | EXTENT_LOCKED,
1768 * when reads are done, we need to check csums to verify the data is correct
1769 * if there's a match, we allow the bio to finish. If not, we go through
1770 * the io_failure_record routines to find good copies
1772 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1773 struct extent_state *state)
1775 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1776 struct inode *inode = page->mapping->host;
1777 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1779 u64 private = ~(u32)0;
1781 struct btrfs_root *root = BTRFS_I(inode)->root;
1784 if (PageChecked(page)) {
1785 ClearPageChecked(page);
1788 if (btrfs_test_flag(inode, NODATASUM))
1791 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1792 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1793 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1798 if (state && state->start == start) {
1799 private = state->private;
1802 ret = get_state_private(io_tree, start, &private);
1804 kaddr = kmap_atomic(page, KM_USER0);
1808 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1809 btrfs_csum_final(csum, (char *)&csum);
1810 if (csum != private)
1813 kunmap_atomic(kaddr, KM_USER0);
1815 /* if the io failure tree for this inode is non-empty,
1816 * check to see if we've recovered from a failed IO
1818 btrfs_clean_io_failures(inode, start);
1822 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1823 "private %llu\n", page->mapping->host->i_ino,
1824 (unsigned long long)start, csum,
1825 (unsigned long long)private);
1826 memset(kaddr + offset, 1, end - start + 1);
1827 flush_dcache_page(page);
1828 kunmap_atomic(kaddr, KM_USER0);
1835 * This creates an orphan entry for the given inode in case something goes
1836 * wrong in the middle of an unlink/truncate.
1838 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1840 struct btrfs_root *root = BTRFS_I(inode)->root;
1843 spin_lock(&root->list_lock);
1845 /* already on the orphan list, we're good */
1846 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1847 spin_unlock(&root->list_lock);
1851 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1853 spin_unlock(&root->list_lock);
1856 * insert an orphan item to track this unlinked/truncated file
1858 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1864 * We have done the truncate/delete so we can go ahead and remove the orphan
1865 * item for this particular inode.
1867 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1869 struct btrfs_root *root = BTRFS_I(inode)->root;
1872 spin_lock(&root->list_lock);
1874 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1875 spin_unlock(&root->list_lock);
1879 list_del_init(&BTRFS_I(inode)->i_orphan);
1881 spin_unlock(&root->list_lock);
1885 spin_unlock(&root->list_lock);
1887 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1893 * this cleans up any orphans that may be left on the list from the last use
1896 void btrfs_orphan_cleanup(struct btrfs_root *root)
1898 struct btrfs_path *path;
1899 struct extent_buffer *leaf;
1900 struct btrfs_item *item;
1901 struct btrfs_key key, found_key;
1902 struct btrfs_trans_handle *trans;
1903 struct inode *inode;
1904 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1906 path = btrfs_alloc_path();
1911 key.objectid = BTRFS_ORPHAN_OBJECTID;
1912 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1913 key.offset = (u64)-1;
1917 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1919 printk(KERN_ERR "Error searching slot for orphan: %d"
1925 * if ret == 0 means we found what we were searching for, which
1926 * is weird, but possible, so only screw with path if we didnt
1927 * find the key and see if we have stuff that matches
1930 if (path->slots[0] == 0)
1935 /* pull out the item */
1936 leaf = path->nodes[0];
1937 item = btrfs_item_nr(leaf, path->slots[0]);
1938 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1940 /* make sure the item matches what we want */
1941 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1943 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1946 /* release the path since we're done with it */
1947 btrfs_release_path(root, path);
1950 * this is where we are basically btrfs_lookup, without the
1951 * crossing root thing. we store the inode number in the
1952 * offset of the orphan item.
1954 inode = btrfs_iget_locked(root->fs_info->sb,
1955 found_key.offset, root);
1959 if (inode->i_state & I_NEW) {
1960 BTRFS_I(inode)->root = root;
1962 /* have to set the location manually */
1963 BTRFS_I(inode)->location.objectid = inode->i_ino;
1964 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1965 BTRFS_I(inode)->location.offset = 0;
1967 btrfs_read_locked_inode(inode);
1968 unlock_new_inode(inode);
1972 * add this inode to the orphan list so btrfs_orphan_del does
1973 * the proper thing when we hit it
1975 spin_lock(&root->list_lock);
1976 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1977 spin_unlock(&root->list_lock);
1980 * if this is a bad inode, means we actually succeeded in
1981 * removing the inode, but not the orphan record, which means
1982 * we need to manually delete the orphan since iput will just
1983 * do a destroy_inode
1985 if (is_bad_inode(inode)) {
1986 trans = btrfs_start_transaction(root, 1);
1987 btrfs_orphan_del(trans, inode);
1988 btrfs_end_transaction(trans, root);
1993 /* if we have links, this was a truncate, lets do that */
1994 if (inode->i_nlink) {
1996 btrfs_truncate(inode);
2001 /* this will do delete_inode and everything for us */
2006 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2008 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2010 btrfs_free_path(path);
2014 * read an inode from the btree into the in-memory inode
2016 void btrfs_read_locked_inode(struct inode *inode)
2018 struct btrfs_path *path;
2019 struct extent_buffer *leaf;
2020 struct btrfs_inode_item *inode_item;
2021 struct btrfs_timespec *tspec;
2022 struct btrfs_root *root = BTRFS_I(inode)->root;
2023 struct btrfs_key location;
2024 u64 alloc_group_block;
2028 path = btrfs_alloc_path();
2030 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2032 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2036 leaf = path->nodes[0];
2037 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2038 struct btrfs_inode_item);
2040 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2041 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2042 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2043 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2044 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2046 tspec = btrfs_inode_atime(inode_item);
2047 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2048 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2050 tspec = btrfs_inode_mtime(inode_item);
2051 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2052 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2054 tspec = btrfs_inode_ctime(inode_item);
2055 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2056 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2058 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2059 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2060 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2061 inode->i_generation = BTRFS_I(inode)->generation;
2063 rdev = btrfs_inode_rdev(leaf, inode_item);
2065 BTRFS_I(inode)->index_cnt = (u64)-1;
2066 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2068 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2070 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2071 alloc_group_block, 0);
2072 btrfs_free_path(path);
2075 switch (inode->i_mode & S_IFMT) {
2077 inode->i_mapping->a_ops = &btrfs_aops;
2078 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2079 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2080 inode->i_fop = &btrfs_file_operations;
2081 inode->i_op = &btrfs_file_inode_operations;
2084 inode->i_fop = &btrfs_dir_file_operations;
2085 if (root == root->fs_info->tree_root)
2086 inode->i_op = &btrfs_dir_ro_inode_operations;
2088 inode->i_op = &btrfs_dir_inode_operations;
2091 inode->i_op = &btrfs_symlink_inode_operations;
2092 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2093 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2096 inode->i_op = &btrfs_special_inode_operations;
2097 init_special_inode(inode, inode->i_mode, rdev);
2103 btrfs_free_path(path);
2104 make_bad_inode(inode);
2108 * given a leaf and an inode, copy the inode fields into the leaf
2110 static void fill_inode_item(struct btrfs_trans_handle *trans,
2111 struct extent_buffer *leaf,
2112 struct btrfs_inode_item *item,
2113 struct inode *inode)
2115 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2116 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2117 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2118 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2119 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2121 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2122 inode->i_atime.tv_sec);
2123 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2124 inode->i_atime.tv_nsec);
2126 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2127 inode->i_mtime.tv_sec);
2128 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2129 inode->i_mtime.tv_nsec);
2131 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2132 inode->i_ctime.tv_sec);
2133 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2134 inode->i_ctime.tv_nsec);
2136 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2137 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2138 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2139 btrfs_set_inode_transid(leaf, item, trans->transid);
2140 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2141 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2142 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2146 * copy everything in the in-memory inode into the btree.
2148 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2149 struct btrfs_root *root, struct inode *inode)
2151 struct btrfs_inode_item *inode_item;
2152 struct btrfs_path *path;
2153 struct extent_buffer *leaf;
2156 path = btrfs_alloc_path();
2158 path->leave_spinning = 1;
2159 ret = btrfs_lookup_inode(trans, root, path,
2160 &BTRFS_I(inode)->location, 1);
2167 btrfs_unlock_up_safe(path, 1);
2168 leaf = path->nodes[0];
2169 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2170 struct btrfs_inode_item);
2172 fill_inode_item(trans, leaf, inode_item, inode);
2173 btrfs_mark_buffer_dirty(leaf);
2174 btrfs_set_inode_last_trans(trans, inode);
2177 btrfs_free_path(path);
2183 * unlink helper that gets used here in inode.c and in the tree logging
2184 * recovery code. It remove a link in a directory with a given name, and
2185 * also drops the back refs in the inode to the directory
2187 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2188 struct btrfs_root *root,
2189 struct inode *dir, struct inode *inode,
2190 const char *name, int name_len)
2192 struct btrfs_path *path;
2194 struct extent_buffer *leaf;
2195 struct btrfs_dir_item *di;
2196 struct btrfs_key key;
2199 path = btrfs_alloc_path();
2205 path->leave_spinning = 1;
2206 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2207 name, name_len, -1);
2216 leaf = path->nodes[0];
2217 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2218 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2221 btrfs_release_path(root, path);
2223 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2225 dir->i_ino, &index);
2227 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2228 "inode %lu parent %lu\n", name_len, name,
2229 inode->i_ino, dir->i_ino);
2233 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2234 index, name, name_len, -1);
2243 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2244 btrfs_release_path(root, path);
2246 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2248 BUG_ON(ret != 0 && ret != -ENOENT);
2250 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2254 btrfs_free_path(path);
2258 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2259 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2260 btrfs_update_inode(trans, root, dir);
2261 btrfs_drop_nlink(inode);
2262 ret = btrfs_update_inode(trans, root, inode);
2263 dir->i_sb->s_dirt = 1;
2268 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2270 struct btrfs_root *root;
2271 struct btrfs_trans_handle *trans;
2272 struct inode *inode = dentry->d_inode;
2274 unsigned long nr = 0;
2276 root = BTRFS_I(dir)->root;
2278 trans = btrfs_start_transaction(root, 1);
2280 btrfs_set_trans_block_group(trans, dir);
2282 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2284 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2285 dentry->d_name.name, dentry->d_name.len);
2287 if (inode->i_nlink == 0)
2288 ret = btrfs_orphan_add(trans, inode);
2290 nr = trans->blocks_used;
2292 btrfs_end_transaction_throttle(trans, root);
2293 btrfs_btree_balance_dirty(root, nr);
2297 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2299 struct inode *inode = dentry->d_inode;
2302 struct btrfs_root *root = BTRFS_I(dir)->root;
2303 struct btrfs_trans_handle *trans;
2304 unsigned long nr = 0;
2307 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2308 * the root of a subvolume or snapshot
2310 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2311 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2315 trans = btrfs_start_transaction(root, 1);
2316 btrfs_set_trans_block_group(trans, dir);
2318 err = btrfs_orphan_add(trans, inode);
2322 /* now the directory is empty */
2323 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2324 dentry->d_name.name, dentry->d_name.len);
2326 btrfs_i_size_write(inode, 0);
2329 nr = trans->blocks_used;
2330 ret = btrfs_end_transaction_throttle(trans, root);
2331 btrfs_btree_balance_dirty(root, nr);
2340 * when truncating bytes in a file, it is possible to avoid reading
2341 * the leaves that contain only checksum items. This can be the
2342 * majority of the IO required to delete a large file, but it must
2343 * be done carefully.
2345 * The keys in the level just above the leaves are checked to make sure
2346 * the lowest key in a given leaf is a csum key, and starts at an offset
2347 * after the new size.
2349 * Then the key for the next leaf is checked to make sure it also has
2350 * a checksum item for the same file. If it does, we know our target leaf
2351 * contains only checksum items, and it can be safely freed without reading
2354 * This is just an optimization targeted at large files. It may do
2355 * nothing. It will return 0 unless things went badly.
2357 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2358 struct btrfs_root *root,
2359 struct btrfs_path *path,
2360 struct inode *inode, u64 new_size)
2362 struct btrfs_key key;
2365 struct btrfs_key found_key;
2366 struct btrfs_key other_key;
2367 struct btrfs_leaf_ref *ref;
2371 path->lowest_level = 1;
2372 key.objectid = inode->i_ino;
2373 key.type = BTRFS_CSUM_ITEM_KEY;
2374 key.offset = new_size;
2376 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2380 if (path->nodes[1] == NULL) {
2385 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2386 nritems = btrfs_header_nritems(path->nodes[1]);
2391 if (path->slots[1] >= nritems)
2394 /* did we find a key greater than anything we want to delete? */
2395 if (found_key.objectid > inode->i_ino ||
2396 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2399 /* we check the next key in the node to make sure the leave contains
2400 * only checksum items. This comparison doesn't work if our
2401 * leaf is the last one in the node
2403 if (path->slots[1] + 1 >= nritems) {
2405 /* search forward from the last key in the node, this
2406 * will bring us into the next node in the tree
2408 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2410 /* unlikely, but we inc below, so check to be safe */
2411 if (found_key.offset == (u64)-1)
2414 /* search_forward needs a path with locks held, do the
2415 * search again for the original key. It is possible
2416 * this will race with a balance and return a path that
2417 * we could modify, but this drop is just an optimization
2418 * and is allowed to miss some leaves.
2420 btrfs_release_path(root, path);
2423 /* setup a max key for search_forward */
2424 other_key.offset = (u64)-1;
2425 other_key.type = key.type;
2426 other_key.objectid = key.objectid;
2428 path->keep_locks = 1;
2429 ret = btrfs_search_forward(root, &found_key, &other_key,
2431 path->keep_locks = 0;
2432 if (ret || found_key.objectid != key.objectid ||
2433 found_key.type != key.type) {
2438 key.offset = found_key.offset;
2439 btrfs_release_path(root, path);
2444 /* we know there's one more slot after us in the tree,
2445 * read that key so we can verify it is also a checksum item
2447 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2449 if (found_key.objectid < inode->i_ino)
2452 if (found_key.type != key.type || found_key.offset < new_size)
2456 * if the key for the next leaf isn't a csum key from this objectid,
2457 * we can't be sure there aren't good items inside this leaf.
2460 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2463 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2464 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2466 * it is safe to delete this leaf, it contains only
2467 * csum items from this inode at an offset >= new_size
2469 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2472 if (root->ref_cows && leaf_gen < trans->transid) {
2473 ref = btrfs_alloc_leaf_ref(root, 0);
2475 ref->root_gen = root->root_key.offset;
2476 ref->bytenr = leaf_start;
2478 ref->generation = leaf_gen;
2481 btrfs_sort_leaf_ref(ref);
2483 ret = btrfs_add_leaf_ref(root, ref, 0);
2485 btrfs_free_leaf_ref(root, ref);
2491 btrfs_release_path(root, path);
2493 if (other_key.objectid == inode->i_ino &&
2494 other_key.type == key.type && other_key.offset > key.offset) {
2495 key.offset = other_key.offset;
2501 /* fixup any changes we've made to the path */
2502 path->lowest_level = 0;
2503 path->keep_locks = 0;
2504 btrfs_release_path(root, path);
2511 * this can truncate away extent items, csum items and directory items.
2512 * It starts at a high offset and removes keys until it can't find
2513 * any higher than new_size
2515 * csum items that cross the new i_size are truncated to the new size
2518 * min_type is the minimum key type to truncate down to. If set to 0, this
2519 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2521 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2522 struct btrfs_root *root,
2523 struct inode *inode,
2524 u64 new_size, u32 min_type)
2527 struct btrfs_path *path;
2528 struct btrfs_key key;
2529 struct btrfs_key found_key;
2530 u32 found_type = (u8)-1;
2531 struct extent_buffer *leaf;
2532 struct btrfs_file_extent_item *fi;
2533 u64 extent_start = 0;
2534 u64 extent_num_bytes = 0;
2540 int pending_del_nr = 0;
2541 int pending_del_slot = 0;
2542 int extent_type = -1;
2544 u64 mask = root->sectorsize - 1;
2547 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2548 path = btrfs_alloc_path();
2552 /* FIXME, add redo link to tree so we don't leak on crash */
2553 key.objectid = inode->i_ino;
2554 key.offset = (u64)-1;
2558 path->leave_spinning = 1;
2559 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2564 /* there are no items in the tree for us to truncate, we're
2567 if (path->slots[0] == 0) {
2576 leaf = path->nodes[0];
2577 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2578 found_type = btrfs_key_type(&found_key);
2581 if (found_key.objectid != inode->i_ino)
2584 if (found_type < min_type)
2587 item_end = found_key.offset;
2588 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2589 fi = btrfs_item_ptr(leaf, path->slots[0],
2590 struct btrfs_file_extent_item);
2591 extent_type = btrfs_file_extent_type(leaf, fi);
2592 encoding = btrfs_file_extent_compression(leaf, fi);
2593 encoding |= btrfs_file_extent_encryption(leaf, fi);
2594 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2596 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2598 btrfs_file_extent_num_bytes(leaf, fi);
2599 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2600 item_end += btrfs_file_extent_inline_len(leaf,
2605 if (item_end < new_size) {
2606 if (found_type == BTRFS_DIR_ITEM_KEY)
2607 found_type = BTRFS_INODE_ITEM_KEY;
2608 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2609 found_type = BTRFS_EXTENT_DATA_KEY;
2610 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2611 found_type = BTRFS_XATTR_ITEM_KEY;
2612 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2613 found_type = BTRFS_INODE_REF_KEY;
2614 else if (found_type)
2618 btrfs_set_key_type(&key, found_type);
2621 if (found_key.offset >= new_size)
2627 /* FIXME, shrink the extent if the ref count is only 1 */
2628 if (found_type != BTRFS_EXTENT_DATA_KEY)
2631 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2633 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2634 if (!del_item && !encoding) {
2635 u64 orig_num_bytes =
2636 btrfs_file_extent_num_bytes(leaf, fi);
2637 extent_num_bytes = new_size -
2638 found_key.offset + root->sectorsize - 1;
2639 extent_num_bytes = extent_num_bytes &
2640 ~((u64)root->sectorsize - 1);
2641 btrfs_set_file_extent_num_bytes(leaf, fi,
2643 num_dec = (orig_num_bytes -
2645 if (root->ref_cows && extent_start != 0)
2646 inode_sub_bytes(inode, num_dec);
2647 btrfs_mark_buffer_dirty(leaf);
2650 btrfs_file_extent_disk_num_bytes(leaf,
2652 /* FIXME blocksize != 4096 */
2653 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2654 if (extent_start != 0) {
2657 inode_sub_bytes(inode, num_dec);
2659 root_gen = btrfs_header_generation(leaf);
2660 root_owner = btrfs_header_owner(leaf);
2662 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2664 * we can't truncate inline items that have had
2668 btrfs_file_extent_compression(leaf, fi) == 0 &&
2669 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2670 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2671 u32 size = new_size - found_key.offset;
2673 if (root->ref_cows) {
2674 inode_sub_bytes(inode, item_end + 1 -
2678 btrfs_file_extent_calc_inline_size(size);
2679 ret = btrfs_truncate_item(trans, root, path,
2682 } else if (root->ref_cows) {
2683 inode_sub_bytes(inode, item_end + 1 -
2689 if (!pending_del_nr) {
2690 /* no pending yet, add ourselves */
2691 pending_del_slot = path->slots[0];
2693 } else if (pending_del_nr &&
2694 path->slots[0] + 1 == pending_del_slot) {
2695 /* hop on the pending chunk */
2697 pending_del_slot = path->slots[0];
2705 btrfs_set_path_blocking(path);
2706 ret = btrfs_free_extent(trans, root, extent_start,
2708 leaf->start, root_owner,
2709 root_gen, inode->i_ino, 0);
2713 if (path->slots[0] == 0) {
2716 btrfs_release_path(root, path);
2717 if (found_type == BTRFS_INODE_ITEM_KEY)
2723 if (pending_del_nr &&
2724 path->slots[0] + 1 != pending_del_slot) {
2725 struct btrfs_key debug;
2727 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2729 ret = btrfs_del_items(trans, root, path,
2734 btrfs_release_path(root, path);
2735 if (found_type == BTRFS_INODE_ITEM_KEY)
2742 if (pending_del_nr) {
2743 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2746 btrfs_free_path(path);
2747 inode->i_sb->s_dirt = 1;
2752 * taken from block_truncate_page, but does cow as it zeros out
2753 * any bytes left in the last page in the file.
2755 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2757 struct inode *inode = mapping->host;
2758 struct btrfs_root *root = BTRFS_I(inode)->root;
2759 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2760 struct btrfs_ordered_extent *ordered;
2762 u32 blocksize = root->sectorsize;
2763 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2764 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2770 if ((offset & (blocksize - 1)) == 0)
2775 page = grab_cache_page(mapping, index);
2779 page_start = page_offset(page);
2780 page_end = page_start + PAGE_CACHE_SIZE - 1;
2782 if (!PageUptodate(page)) {
2783 ret = btrfs_readpage(NULL, page);
2785 if (page->mapping != mapping) {
2787 page_cache_release(page);
2790 if (!PageUptodate(page)) {
2795 wait_on_page_writeback(page);
2797 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2798 set_page_extent_mapped(page);
2800 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2802 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2804 page_cache_release(page);
2805 btrfs_start_ordered_extent(inode, ordered, 1);
2806 btrfs_put_ordered_extent(ordered);
2810 btrfs_set_extent_delalloc(inode, page_start, page_end);
2812 if (offset != PAGE_CACHE_SIZE) {
2814 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2815 flush_dcache_page(page);
2818 ClearPageChecked(page);
2819 set_page_dirty(page);
2820 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2824 page_cache_release(page);
2829 int btrfs_cont_expand(struct inode *inode, loff_t size)
2831 struct btrfs_trans_handle *trans;
2832 struct btrfs_root *root = BTRFS_I(inode)->root;
2833 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2834 struct extent_map *em;
2835 u64 mask = root->sectorsize - 1;
2836 u64 hole_start = (inode->i_size + mask) & ~mask;
2837 u64 block_end = (size + mask) & ~mask;
2843 if (size <= hole_start)
2846 err = btrfs_check_metadata_free_space(root);
2850 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2853 struct btrfs_ordered_extent *ordered;
2854 btrfs_wait_ordered_range(inode, hole_start,
2855 block_end - hole_start);
2856 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2857 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2860 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2861 btrfs_put_ordered_extent(ordered);
2864 trans = btrfs_start_transaction(root, 1);
2865 btrfs_set_trans_block_group(trans, inode);
2867 cur_offset = hole_start;
2869 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2870 block_end - cur_offset, 0);
2871 BUG_ON(IS_ERR(em) || !em);
2872 last_byte = min(extent_map_end(em), block_end);
2873 last_byte = (last_byte + mask) & ~mask;
2874 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2876 hole_size = last_byte - cur_offset;
2877 err = btrfs_drop_extents(trans, root, inode,
2879 cur_offset + hole_size,
2880 cur_offset, &hint_byte);
2883 err = btrfs_insert_file_extent(trans, root,
2884 inode->i_ino, cur_offset, 0,
2885 0, hole_size, 0, hole_size,
2887 btrfs_drop_extent_cache(inode, hole_start,
2890 free_extent_map(em);
2891 cur_offset = last_byte;
2892 if (err || cur_offset >= block_end)
2896 btrfs_end_transaction(trans, root);
2897 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2901 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2903 struct inode *inode = dentry->d_inode;
2906 err = inode_change_ok(inode, attr);
2910 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2911 if (attr->ia_size > inode->i_size) {
2912 err = btrfs_cont_expand(inode, attr->ia_size);
2915 } else if (inode->i_size > 0 &&
2916 attr->ia_size == 0) {
2918 /* we're truncating a file that used to have good
2919 * data down to zero. Make sure it gets into
2920 * the ordered flush list so that any new writes
2921 * get down to disk quickly.
2923 BTRFS_I(inode)->ordered_data_close = 1;
2927 err = inode_setattr(inode, attr);
2929 if (!err && ((attr->ia_valid & ATTR_MODE)))
2930 err = btrfs_acl_chmod(inode);
2934 void btrfs_delete_inode(struct inode *inode)
2936 struct btrfs_trans_handle *trans;
2937 struct btrfs_root *root = BTRFS_I(inode)->root;
2941 truncate_inode_pages(&inode->i_data, 0);
2942 if (is_bad_inode(inode)) {
2943 btrfs_orphan_del(NULL, inode);
2946 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2948 btrfs_i_size_write(inode, 0);
2949 trans = btrfs_join_transaction(root, 1);
2951 btrfs_set_trans_block_group(trans, inode);
2952 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2954 btrfs_orphan_del(NULL, inode);
2955 goto no_delete_lock;
2958 btrfs_orphan_del(trans, inode);
2960 nr = trans->blocks_used;
2963 btrfs_end_transaction(trans, root);
2964 btrfs_btree_balance_dirty(root, nr);
2968 nr = trans->blocks_used;
2969 btrfs_end_transaction(trans, root);
2970 btrfs_btree_balance_dirty(root, nr);
2976 * this returns the key found in the dir entry in the location pointer.
2977 * If no dir entries were found, location->objectid is 0.
2979 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2980 struct btrfs_key *location)
2982 const char *name = dentry->d_name.name;
2983 int namelen = dentry->d_name.len;
2984 struct btrfs_dir_item *di;
2985 struct btrfs_path *path;
2986 struct btrfs_root *root = BTRFS_I(dir)->root;
2989 path = btrfs_alloc_path();
2992 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2997 if (!di || IS_ERR(di))
3000 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3002 btrfs_free_path(path);
3005 location->objectid = 0;
3010 * when we hit a tree root in a directory, the btrfs part of the inode
3011 * needs to be changed to reflect the root directory of the tree root. This
3012 * is kind of like crossing a mount point.
3014 static int fixup_tree_root_location(struct btrfs_root *root,
3015 struct btrfs_key *location,
3016 struct btrfs_root **sub_root,
3017 struct dentry *dentry)
3019 struct btrfs_root_item *ri;
3021 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3023 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3026 *sub_root = btrfs_read_fs_root(root->fs_info, location,
3027 dentry->d_name.name,
3028 dentry->d_name.len);
3029 if (IS_ERR(*sub_root))
3030 return PTR_ERR(*sub_root);
3032 ri = &(*sub_root)->root_item;
3033 location->objectid = btrfs_root_dirid(ri);
3034 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3035 location->offset = 0;
3040 static noinline void init_btrfs_i(struct inode *inode)
3042 struct btrfs_inode *bi = BTRFS_I(inode);
3045 bi->i_default_acl = NULL;
3050 bi->logged_trans = 0;
3051 bi->delalloc_bytes = 0;
3052 bi->reserved_bytes = 0;
3053 bi->disk_i_size = 0;
3055 bi->index_cnt = (u64)-1;
3056 bi->last_unlink_trans = 0;
3057 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3058 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3059 inode->i_mapping, GFP_NOFS);
3060 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3061 inode->i_mapping, GFP_NOFS);
3062 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3063 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3064 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3065 mutex_init(&BTRFS_I(inode)->extent_mutex);
3066 mutex_init(&BTRFS_I(inode)->log_mutex);
3069 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3071 struct btrfs_iget_args *args = p;
3072 inode->i_ino = args->ino;
3073 init_btrfs_i(inode);
3074 BTRFS_I(inode)->root = args->root;
3075 btrfs_set_inode_space_info(args->root, inode);
3079 static int btrfs_find_actor(struct inode *inode, void *opaque)
3081 struct btrfs_iget_args *args = opaque;
3082 return args->ino == inode->i_ino &&
3083 args->root == BTRFS_I(inode)->root;
3086 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3087 struct btrfs_root *root, int wait)
3089 struct inode *inode;
3090 struct btrfs_iget_args args;
3091 args.ino = objectid;
3095 inode = ilookup5(s, objectid, btrfs_find_actor,
3098 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3104 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3105 struct btrfs_root *root)
3107 struct inode *inode;
3108 struct btrfs_iget_args args;
3109 args.ino = objectid;
3112 inode = iget5_locked(s, objectid, btrfs_find_actor,
3113 btrfs_init_locked_inode,
3118 /* Get an inode object given its location and corresponding root.
3119 * Returns in *is_new if the inode was read from disk
3121 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3122 struct btrfs_root *root, int *is_new)
3124 struct inode *inode;
3126 inode = btrfs_iget_locked(s, location->objectid, root);
3128 return ERR_PTR(-EACCES);
3130 if (inode->i_state & I_NEW) {
3131 BTRFS_I(inode)->root = root;
3132 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3133 btrfs_read_locked_inode(inode);
3134 unlock_new_inode(inode);
3145 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3147 struct inode *inode;
3148 struct btrfs_inode *bi = BTRFS_I(dir);
3149 struct btrfs_root *root = bi->root;
3150 struct btrfs_root *sub_root = root;
3151 struct btrfs_key location;
3154 if (dentry->d_name.len > BTRFS_NAME_LEN)
3155 return ERR_PTR(-ENAMETOOLONG);
3157 ret = btrfs_inode_by_name(dir, dentry, &location);
3160 return ERR_PTR(ret);
3163 if (location.objectid) {
3164 ret = fixup_tree_root_location(root, &location, &sub_root,
3167 return ERR_PTR(ret);
3169 return ERR_PTR(-ENOENT);
3170 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3172 return ERR_CAST(inode);
3177 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3178 struct nameidata *nd)
3180 struct inode *inode;
3182 if (dentry->d_name.len > BTRFS_NAME_LEN)
3183 return ERR_PTR(-ENAMETOOLONG);
3185 inode = btrfs_lookup_dentry(dir, dentry);
3187 return ERR_CAST(inode);
3189 return d_splice_alias(inode, dentry);
3192 static unsigned char btrfs_filetype_table[] = {
3193 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3196 static int btrfs_real_readdir(struct file *filp, void *dirent,
3199 struct inode *inode = filp->f_dentry->d_inode;
3200 struct btrfs_root *root = BTRFS_I(inode)->root;
3201 struct btrfs_item *item;
3202 struct btrfs_dir_item *di;
3203 struct btrfs_key key;
3204 struct btrfs_key found_key;
3205 struct btrfs_path *path;
3208 struct extent_buffer *leaf;
3211 unsigned char d_type;
3216 int key_type = BTRFS_DIR_INDEX_KEY;
3221 /* FIXME, use a real flag for deciding about the key type */
3222 if (root->fs_info->tree_root == root)
3223 key_type = BTRFS_DIR_ITEM_KEY;
3225 /* special case for "." */
3226 if (filp->f_pos == 0) {
3227 over = filldir(dirent, ".", 1,
3234 /* special case for .., just use the back ref */
3235 if (filp->f_pos == 1) {
3236 u64 pino = parent_ino(filp->f_path.dentry);
3237 over = filldir(dirent, "..", 2,
3243 path = btrfs_alloc_path();
3246 btrfs_set_key_type(&key, key_type);
3247 key.offset = filp->f_pos;
3248 key.objectid = inode->i_ino;
3250 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3256 leaf = path->nodes[0];
3257 nritems = btrfs_header_nritems(leaf);
3258 slot = path->slots[0];
3259 if (advance || slot >= nritems) {
3260 if (slot >= nritems - 1) {
3261 ret = btrfs_next_leaf(root, path);
3264 leaf = path->nodes[0];
3265 nritems = btrfs_header_nritems(leaf);
3266 slot = path->slots[0];
3274 item = btrfs_item_nr(leaf, slot);
3275 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3277 if (found_key.objectid != key.objectid)
3279 if (btrfs_key_type(&found_key) != key_type)
3281 if (found_key.offset < filp->f_pos)
3284 filp->f_pos = found_key.offset;
3286 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3288 di_total = btrfs_item_size(leaf, item);
3290 while (di_cur < di_total) {
3291 struct btrfs_key location;
3293 name_len = btrfs_dir_name_len(leaf, di);
3294 if (name_len <= sizeof(tmp_name)) {
3295 name_ptr = tmp_name;
3297 name_ptr = kmalloc(name_len, GFP_NOFS);
3303 read_extent_buffer(leaf, name_ptr,
3304 (unsigned long)(di + 1), name_len);
3306 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3307 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3309 /* is this a reference to our own snapshot? If so
3312 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3313 location.objectid == root->root_key.objectid) {
3317 over = filldir(dirent, name_ptr, name_len,
3318 found_key.offset, location.objectid,
3322 if (name_ptr != tmp_name)
3327 di_len = btrfs_dir_name_len(leaf, di) +
3328 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3330 di = (struct btrfs_dir_item *)((char *)di + di_len);
3334 /* Reached end of directory/root. Bump pos past the last item. */
3335 if (key_type == BTRFS_DIR_INDEX_KEY)
3336 filp->f_pos = INT_LIMIT(off_t);
3342 btrfs_free_path(path);
3346 int btrfs_write_inode(struct inode *inode, int wait)
3348 struct btrfs_root *root = BTRFS_I(inode)->root;
3349 struct btrfs_trans_handle *trans;
3352 if (root->fs_info->btree_inode == inode)
3356 trans = btrfs_join_transaction(root, 1);
3357 btrfs_set_trans_block_group(trans, inode);
3358 ret = btrfs_commit_transaction(trans, root);
3364 * This is somewhat expensive, updating the tree every time the
3365 * inode changes. But, it is most likely to find the inode in cache.
3366 * FIXME, needs more benchmarking...there are no reasons other than performance
3367 * to keep or drop this code.
3369 void btrfs_dirty_inode(struct inode *inode)
3371 struct btrfs_root *root = BTRFS_I(inode)->root;
3372 struct btrfs_trans_handle *trans;
3374 trans = btrfs_join_transaction(root, 1);
3375 btrfs_set_trans_block_group(trans, inode);
3376 btrfs_update_inode(trans, root, inode);
3377 btrfs_end_transaction(trans, root);
3381 * find the highest existing sequence number in a directory
3382 * and then set the in-memory index_cnt variable to reflect
3383 * free sequence numbers
3385 static int btrfs_set_inode_index_count(struct inode *inode)
3387 struct btrfs_root *root = BTRFS_I(inode)->root;
3388 struct btrfs_key key, found_key;
3389 struct btrfs_path *path;
3390 struct extent_buffer *leaf;
3393 key.objectid = inode->i_ino;
3394 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3395 key.offset = (u64)-1;
3397 path = btrfs_alloc_path();
3401 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3404 /* FIXME: we should be able to handle this */
3410 * MAGIC NUMBER EXPLANATION:
3411 * since we search a directory based on f_pos we have to start at 2
3412 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3413 * else has to start at 2
3415 if (path->slots[0] == 0) {
3416 BTRFS_I(inode)->index_cnt = 2;
3422 leaf = path->nodes[0];
3423 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3425 if (found_key.objectid != inode->i_ino ||
3426 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3427 BTRFS_I(inode)->index_cnt = 2;
3431 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3433 btrfs_free_path(path);
3438 * helper to find a free sequence number in a given directory. This current
3439 * code is very simple, later versions will do smarter things in the btree
3441 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3445 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3446 ret = btrfs_set_inode_index_count(dir);
3451 *index = BTRFS_I(dir)->index_cnt;
3452 BTRFS_I(dir)->index_cnt++;
3457 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3458 struct btrfs_root *root,
3460 const char *name, int name_len,
3461 u64 ref_objectid, u64 objectid,
3462 u64 alloc_hint, int mode, u64 *index)
3464 struct inode *inode;
3465 struct btrfs_inode_item *inode_item;
3466 struct btrfs_key *location;
3467 struct btrfs_path *path;
3468 struct btrfs_inode_ref *ref;
3469 struct btrfs_key key[2];
3475 path = btrfs_alloc_path();
3478 inode = new_inode(root->fs_info->sb);
3480 return ERR_PTR(-ENOMEM);
3483 ret = btrfs_set_inode_index(dir, index);
3485 return ERR_PTR(ret);
3488 * index_cnt is ignored for everything but a dir,
3489 * btrfs_get_inode_index_count has an explanation for the magic
3492 init_btrfs_i(inode);
3493 BTRFS_I(inode)->index_cnt = 2;
3494 BTRFS_I(inode)->root = root;
3495 BTRFS_I(inode)->generation = trans->transid;
3496 btrfs_set_inode_space_info(root, inode);
3502 BTRFS_I(inode)->block_group =
3503 btrfs_find_block_group(root, 0, alloc_hint, owner);
3504 if ((mode & S_IFREG)) {
3505 if (btrfs_test_opt(root, NODATASUM))
3506 btrfs_set_flag(inode, NODATASUM);
3507 if (btrfs_test_opt(root, NODATACOW))
3508 btrfs_set_flag(inode, NODATACOW);
3511 key[0].objectid = objectid;
3512 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3515 key[1].objectid = objectid;
3516 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3517 key[1].offset = ref_objectid;
3519 sizes[0] = sizeof(struct btrfs_inode_item);
3520 sizes[1] = name_len + sizeof(*ref);
3522 path->leave_spinning = 1;
3523 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3527 if (objectid > root->highest_inode)
3528 root->highest_inode = objectid;
3530 inode->i_uid = current_fsuid();
3532 if (dir && (dir->i_mode & S_ISGID)) {
3533 inode->i_gid = dir->i_gid;
3537 inode->i_gid = current_fsgid();
3539 inode->i_mode = mode;
3540 inode->i_ino = objectid;
3541 inode_set_bytes(inode, 0);
3542 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3543 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3544 struct btrfs_inode_item);
3545 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3547 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3548 struct btrfs_inode_ref);
3549 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3550 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3551 ptr = (unsigned long)(ref + 1);
3552 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3554 btrfs_mark_buffer_dirty(path->nodes[0]);
3555 btrfs_free_path(path);
3557 location = &BTRFS_I(inode)->location;
3558 location->objectid = objectid;
3559 location->offset = 0;
3560 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3562 insert_inode_hash(inode);
3566 BTRFS_I(dir)->index_cnt--;
3567 btrfs_free_path(path);
3568 return ERR_PTR(ret);
3571 static inline u8 btrfs_inode_type(struct inode *inode)
3573 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3577 * utility function to add 'inode' into 'parent_inode' with
3578 * a give name and a given sequence number.
3579 * if 'add_backref' is true, also insert a backref from the
3580 * inode to the parent directory.
3582 int btrfs_add_link(struct btrfs_trans_handle *trans,
3583 struct inode *parent_inode, struct inode *inode,
3584 const char *name, int name_len, int add_backref, u64 index)
3587 struct btrfs_key key;
3588 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3590 key.objectid = inode->i_ino;
3591 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3594 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3595 parent_inode->i_ino,
3596 &key, btrfs_inode_type(inode),
3600 ret = btrfs_insert_inode_ref(trans, root,
3603 parent_inode->i_ino,
3606 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3608 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3609 ret = btrfs_update_inode(trans, root, parent_inode);
3614 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3615 struct dentry *dentry, struct inode *inode,
3616 int backref, u64 index)
3618 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3619 inode, dentry->d_name.name,
3620 dentry->d_name.len, backref, index);
3622 d_instantiate(dentry, inode);
3630 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3631 int mode, dev_t rdev)
3633 struct btrfs_trans_handle *trans;
3634 struct btrfs_root *root = BTRFS_I(dir)->root;
3635 struct inode *inode = NULL;
3639 unsigned long nr = 0;
3642 if (!new_valid_dev(rdev))
3645 err = btrfs_check_metadata_free_space(root);
3649 trans = btrfs_start_transaction(root, 1);
3650 btrfs_set_trans_block_group(trans, dir);
3652 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3658 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3660 dentry->d_parent->d_inode->i_ino, objectid,
3661 BTRFS_I(dir)->block_group, mode, &index);
3662 err = PTR_ERR(inode);
3666 err = btrfs_init_inode_security(inode, dir);
3672 btrfs_set_trans_block_group(trans, inode);
3673 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3677 inode->i_op = &btrfs_special_inode_operations;
3678 init_special_inode(inode, inode->i_mode, rdev);
3679 btrfs_update_inode(trans, root, inode);
3681 dir->i_sb->s_dirt = 1;
3682 btrfs_update_inode_block_group(trans, inode);
3683 btrfs_update_inode_block_group(trans, dir);
3685 nr = trans->blocks_used;
3686 btrfs_end_transaction_throttle(trans, root);
3689 inode_dec_link_count(inode);
3692 btrfs_btree_balance_dirty(root, nr);
3696 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3697 int mode, struct nameidata *nd)
3699 struct btrfs_trans_handle *trans;
3700 struct btrfs_root *root = BTRFS_I(dir)->root;
3701 struct inode *inode = NULL;
3704 unsigned long nr = 0;
3708 err = btrfs_check_metadata_free_space(root);
3711 trans = btrfs_start_transaction(root, 1);
3712 btrfs_set_trans_block_group(trans, dir);
3714 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3720 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3722 dentry->d_parent->d_inode->i_ino,
3723 objectid, BTRFS_I(dir)->block_group, mode,
3725 err = PTR_ERR(inode);
3729 err = btrfs_init_inode_security(inode, dir);
3735 btrfs_set_trans_block_group(trans, inode);
3736 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3740 inode->i_mapping->a_ops = &btrfs_aops;
3741 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3742 inode->i_fop = &btrfs_file_operations;
3743 inode->i_op = &btrfs_file_inode_operations;
3744 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3746 dir->i_sb->s_dirt = 1;
3747 btrfs_update_inode_block_group(trans, inode);
3748 btrfs_update_inode_block_group(trans, dir);
3750 nr = trans->blocks_used;
3751 btrfs_end_transaction_throttle(trans, root);
3754 inode_dec_link_count(inode);
3757 btrfs_btree_balance_dirty(root, nr);
3761 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3762 struct dentry *dentry)
3764 struct btrfs_trans_handle *trans;
3765 struct btrfs_root *root = BTRFS_I(dir)->root;
3766 struct inode *inode = old_dentry->d_inode;
3768 unsigned long nr = 0;
3772 if (inode->i_nlink == 0)
3775 btrfs_inc_nlink(inode);
3776 err = btrfs_check_metadata_free_space(root);
3779 err = btrfs_set_inode_index(dir, &index);
3783 trans = btrfs_start_transaction(root, 1);
3785 btrfs_set_trans_block_group(trans, dir);
3786 atomic_inc(&inode->i_count);
3788 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3793 dir->i_sb->s_dirt = 1;
3794 btrfs_update_inode_block_group(trans, dir);
3795 err = btrfs_update_inode(trans, root, inode);
3800 nr = trans->blocks_used;
3802 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3803 btrfs_end_transaction_throttle(trans, root);
3806 inode_dec_link_count(inode);
3809 btrfs_btree_balance_dirty(root, nr);
3813 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3815 struct inode *inode = NULL;
3816 struct btrfs_trans_handle *trans;
3817 struct btrfs_root *root = BTRFS_I(dir)->root;
3819 int drop_on_err = 0;
3822 unsigned long nr = 1;
3824 err = btrfs_check_metadata_free_space(root);
3828 trans = btrfs_start_transaction(root, 1);
3829 btrfs_set_trans_block_group(trans, dir);
3831 if (IS_ERR(trans)) {
3832 err = PTR_ERR(trans);
3836 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3842 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3844 dentry->d_parent->d_inode->i_ino, objectid,
3845 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3847 if (IS_ERR(inode)) {
3848 err = PTR_ERR(inode);
3854 err = btrfs_init_inode_security(inode, dir);
3858 inode->i_op = &btrfs_dir_inode_operations;
3859 inode->i_fop = &btrfs_dir_file_operations;
3860 btrfs_set_trans_block_group(trans, inode);
3862 btrfs_i_size_write(inode, 0);
3863 err = btrfs_update_inode(trans, root, inode);
3867 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3868 inode, dentry->d_name.name,
3869 dentry->d_name.len, 0, index);
3873 d_instantiate(dentry, inode);
3875 dir->i_sb->s_dirt = 1;
3876 btrfs_update_inode_block_group(trans, inode);
3877 btrfs_update_inode_block_group(trans, dir);
3880 nr = trans->blocks_used;
3881 btrfs_end_transaction_throttle(trans, root);
3886 btrfs_btree_balance_dirty(root, nr);
3890 /* helper for btfs_get_extent. Given an existing extent in the tree,
3891 * and an extent that you want to insert, deal with overlap and insert
3892 * the new extent into the tree.
3894 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3895 struct extent_map *existing,
3896 struct extent_map *em,
3897 u64 map_start, u64 map_len)
3901 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3902 start_diff = map_start - em->start;
3903 em->start = map_start;
3905 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3906 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3907 em->block_start += start_diff;
3908 em->block_len -= start_diff;
3910 return add_extent_mapping(em_tree, em);
3913 static noinline int uncompress_inline(struct btrfs_path *path,
3914 struct inode *inode, struct page *page,
3915 size_t pg_offset, u64 extent_offset,
3916 struct btrfs_file_extent_item *item)
3919 struct extent_buffer *leaf = path->nodes[0];
3922 unsigned long inline_size;
3925 WARN_ON(pg_offset != 0);
3926 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3927 inline_size = btrfs_file_extent_inline_item_len(leaf,
3928 btrfs_item_nr(leaf, path->slots[0]));
3929 tmp = kmalloc(inline_size, GFP_NOFS);
3930 ptr = btrfs_file_extent_inline_start(item);
3932 read_extent_buffer(leaf, tmp, ptr, inline_size);
3934 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3935 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3936 inline_size, max_size);
3938 char *kaddr = kmap_atomic(page, KM_USER0);
3939 unsigned long copy_size = min_t(u64,
3940 PAGE_CACHE_SIZE - pg_offset,
3941 max_size - extent_offset);
3942 memset(kaddr + pg_offset, 0, copy_size);
3943 kunmap_atomic(kaddr, KM_USER0);
3950 * a bit scary, this does extent mapping from logical file offset to the disk.
3951 * the ugly parts come from merging extents from the disk with the in-ram
3952 * representation. This gets more complex because of the data=ordered code,
3953 * where the in-ram extents might be locked pending data=ordered completion.
3955 * This also copies inline extents directly into the page.
3958 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3959 size_t pg_offset, u64 start, u64 len,
3965 u64 extent_start = 0;
3967 u64 objectid = inode->i_ino;
3969 struct btrfs_path *path = NULL;
3970 struct btrfs_root *root = BTRFS_I(inode)->root;
3971 struct btrfs_file_extent_item *item;
3972 struct extent_buffer *leaf;
3973 struct btrfs_key found_key;
3974 struct extent_map *em = NULL;
3975 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3976 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3977 struct btrfs_trans_handle *trans = NULL;
3981 spin_lock(&em_tree->lock);
3982 em = lookup_extent_mapping(em_tree, start, len);
3984 em->bdev = root->fs_info->fs_devices->latest_bdev;
3985 spin_unlock(&em_tree->lock);
3988 if (em->start > start || em->start + em->len <= start)
3989 free_extent_map(em);
3990 else if (em->block_start == EXTENT_MAP_INLINE && page)
3991 free_extent_map(em);
3995 em = alloc_extent_map(GFP_NOFS);
4000 em->bdev = root->fs_info->fs_devices->latest_bdev;
4001 em->start = EXTENT_MAP_HOLE;
4002 em->orig_start = EXTENT_MAP_HOLE;
4004 em->block_len = (u64)-1;
4007 path = btrfs_alloc_path();
4011 ret = btrfs_lookup_file_extent(trans, root, path,
4012 objectid, start, trans != NULL);
4019 if (path->slots[0] == 0)
4024 leaf = path->nodes[0];
4025 item = btrfs_item_ptr(leaf, path->slots[0],
4026 struct btrfs_file_extent_item);
4027 /* are we inside the extent that was found? */
4028 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4029 found_type = btrfs_key_type(&found_key);
4030 if (found_key.objectid != objectid ||
4031 found_type != BTRFS_EXTENT_DATA_KEY) {
4035 found_type = btrfs_file_extent_type(leaf, item);
4036 extent_start = found_key.offset;
4037 compressed = btrfs_file_extent_compression(leaf, item);
4038 if (found_type == BTRFS_FILE_EXTENT_REG ||
4039 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4040 extent_end = extent_start +
4041 btrfs_file_extent_num_bytes(leaf, item);
4042 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4044 size = btrfs_file_extent_inline_len(leaf, item);
4045 extent_end = (extent_start + size + root->sectorsize - 1) &
4046 ~((u64)root->sectorsize - 1);
4049 if (start >= extent_end) {
4051 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4052 ret = btrfs_next_leaf(root, path);
4059 leaf = path->nodes[0];
4061 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4062 if (found_key.objectid != objectid ||
4063 found_key.type != BTRFS_EXTENT_DATA_KEY)
4065 if (start + len <= found_key.offset)
4068 em->len = found_key.offset - start;
4072 if (found_type == BTRFS_FILE_EXTENT_REG ||
4073 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4074 em->start = extent_start;
4075 em->len = extent_end - extent_start;
4076 em->orig_start = extent_start -
4077 btrfs_file_extent_offset(leaf, item);
4078 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4080 em->block_start = EXTENT_MAP_HOLE;
4084 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4085 em->block_start = bytenr;
4086 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4089 bytenr += btrfs_file_extent_offset(leaf, item);
4090 em->block_start = bytenr;
4091 em->block_len = em->len;
4092 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4093 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4096 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4100 size_t extent_offset;
4103 em->block_start = EXTENT_MAP_INLINE;
4104 if (!page || create) {
4105 em->start = extent_start;
4106 em->len = extent_end - extent_start;
4110 size = btrfs_file_extent_inline_len(leaf, item);
4111 extent_offset = page_offset(page) + pg_offset - extent_start;
4112 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4113 size - extent_offset);
4114 em->start = extent_start + extent_offset;
4115 em->len = (copy_size + root->sectorsize - 1) &
4116 ~((u64)root->sectorsize - 1);
4117 em->orig_start = EXTENT_MAP_INLINE;
4119 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4120 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4121 if (create == 0 && !PageUptodate(page)) {
4122 if (btrfs_file_extent_compression(leaf, item) ==
4123 BTRFS_COMPRESS_ZLIB) {
4124 ret = uncompress_inline(path, inode, page,
4126 extent_offset, item);
4130 read_extent_buffer(leaf, map + pg_offset, ptr,
4134 flush_dcache_page(page);
4135 } else if (create && PageUptodate(page)) {
4138 free_extent_map(em);
4140 btrfs_release_path(root, path);
4141 trans = btrfs_join_transaction(root, 1);
4145 write_extent_buffer(leaf, map + pg_offset, ptr,
4148 btrfs_mark_buffer_dirty(leaf);
4150 set_extent_uptodate(io_tree, em->start,
4151 extent_map_end(em) - 1, GFP_NOFS);
4154 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4161 em->block_start = EXTENT_MAP_HOLE;
4162 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4164 btrfs_release_path(root, path);
4165 if (em->start > start || extent_map_end(em) <= start) {
4166 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4167 "[%llu %llu]\n", (unsigned long long)em->start,
4168 (unsigned long long)em->len,
4169 (unsigned long long)start,
4170 (unsigned long long)len);
4176 spin_lock(&em_tree->lock);
4177 ret = add_extent_mapping(em_tree, em);
4178 /* it is possible that someone inserted the extent into the tree
4179 * while we had the lock dropped. It is also possible that
4180 * an overlapping map exists in the tree
4182 if (ret == -EEXIST) {
4183 struct extent_map *existing;
4187 existing = lookup_extent_mapping(em_tree, start, len);
4188 if (existing && (existing->start > start ||
4189 existing->start + existing->len <= start)) {
4190 free_extent_map(existing);
4194 existing = lookup_extent_mapping(em_tree, em->start,
4197 err = merge_extent_mapping(em_tree, existing,
4200 free_extent_map(existing);
4202 free_extent_map(em);
4207 free_extent_map(em);
4211 free_extent_map(em);
4216 spin_unlock(&em_tree->lock);
4219 btrfs_free_path(path);
4221 ret = btrfs_end_transaction(trans, root);
4226 free_extent_map(em);
4228 return ERR_PTR(err);
4233 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4234 const struct iovec *iov, loff_t offset,
4235 unsigned long nr_segs)
4240 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4241 __u64 start, __u64 len)
4243 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4246 int btrfs_readpage(struct file *file, struct page *page)
4248 struct extent_io_tree *tree;
4249 tree = &BTRFS_I(page->mapping->host)->io_tree;
4250 return extent_read_full_page(tree, page, btrfs_get_extent);
4253 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4255 struct extent_io_tree *tree;
4258 if (current->flags & PF_MEMALLOC) {
4259 redirty_page_for_writepage(wbc, page);
4263 tree = &BTRFS_I(page->mapping->host)->io_tree;
4264 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4267 int btrfs_writepages(struct address_space *mapping,
4268 struct writeback_control *wbc)
4270 struct extent_io_tree *tree;
4272 tree = &BTRFS_I(mapping->host)->io_tree;
4273 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4277 btrfs_readpages(struct file *file, struct address_space *mapping,
4278 struct list_head *pages, unsigned nr_pages)
4280 struct extent_io_tree *tree;
4281 tree = &BTRFS_I(mapping->host)->io_tree;
4282 return extent_readpages(tree, mapping, pages, nr_pages,
4285 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4287 struct extent_io_tree *tree;
4288 struct extent_map_tree *map;
4291 tree = &BTRFS_I(page->mapping->host)->io_tree;
4292 map = &BTRFS_I(page->mapping->host)->extent_tree;
4293 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4295 ClearPagePrivate(page);
4296 set_page_private(page, 0);
4297 page_cache_release(page);
4302 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4304 if (PageWriteback(page) || PageDirty(page))
4306 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4309 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4311 struct extent_io_tree *tree;
4312 struct btrfs_ordered_extent *ordered;
4313 u64 page_start = page_offset(page);
4314 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4316 wait_on_page_writeback(page);
4317 tree = &BTRFS_I(page->mapping->host)->io_tree;
4319 btrfs_releasepage(page, GFP_NOFS);
4323 lock_extent(tree, page_start, page_end, GFP_NOFS);
4324 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4328 * IO on this page will never be started, so we need
4329 * to account for any ordered extents now
4331 clear_extent_bit(tree, page_start, page_end,
4332 EXTENT_DIRTY | EXTENT_DELALLOC |
4333 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4334 btrfs_finish_ordered_io(page->mapping->host,
4335 page_start, page_end);
4336 btrfs_put_ordered_extent(ordered);
4337 lock_extent(tree, page_start, page_end, GFP_NOFS);
4339 clear_extent_bit(tree, page_start, page_end,
4340 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4343 __btrfs_releasepage(page, GFP_NOFS);
4345 ClearPageChecked(page);
4346 if (PagePrivate(page)) {
4347 ClearPagePrivate(page);
4348 set_page_private(page, 0);
4349 page_cache_release(page);
4354 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4355 * called from a page fault handler when a page is first dirtied. Hence we must
4356 * be careful to check for EOF conditions here. We set the page up correctly
4357 * for a written page which means we get ENOSPC checking when writing into
4358 * holes and correct delalloc and unwritten extent mapping on filesystems that
4359 * support these features.
4361 * We are not allowed to take the i_mutex here so we have to play games to
4362 * protect against truncate races as the page could now be beyond EOF. Because
4363 * vmtruncate() writes the inode size before removing pages, once we have the
4364 * page lock we can determine safely if the page is beyond EOF. If it is not
4365 * beyond EOF, then the page is guaranteed safe against truncation until we
4368 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4370 struct page *page = vmf->page;
4371 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4372 struct btrfs_root *root = BTRFS_I(inode)->root;
4373 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4374 struct btrfs_ordered_extent *ordered;
4376 unsigned long zero_start;
4382 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4386 else /* -ENOSPC, -EIO, etc */
4387 ret = VM_FAULT_SIGBUS;
4391 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4394 size = i_size_read(inode);
4395 page_start = page_offset(page);
4396 page_end = page_start + PAGE_CACHE_SIZE - 1;
4398 if ((page->mapping != inode->i_mapping) ||
4399 (page_start >= size)) {
4400 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4401 /* page got truncated out from underneath us */
4404 wait_on_page_writeback(page);
4406 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4407 set_page_extent_mapped(page);
4410 * we can't set the delalloc bits if there are pending ordered
4411 * extents. Drop our locks and wait for them to finish
4413 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4415 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4417 btrfs_start_ordered_extent(inode, ordered, 1);
4418 btrfs_put_ordered_extent(ordered);
4422 btrfs_set_extent_delalloc(inode, page_start, page_end);
4425 /* page is wholly or partially inside EOF */
4426 if (page_start + PAGE_CACHE_SIZE > size)
4427 zero_start = size & ~PAGE_CACHE_MASK;
4429 zero_start = PAGE_CACHE_SIZE;
4431 if (zero_start != PAGE_CACHE_SIZE) {
4433 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4434 flush_dcache_page(page);
4437 ClearPageChecked(page);
4438 set_page_dirty(page);
4440 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4441 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4449 static void btrfs_truncate(struct inode *inode)
4451 struct btrfs_root *root = BTRFS_I(inode)->root;
4453 struct btrfs_trans_handle *trans;
4455 u64 mask = root->sectorsize - 1;
4457 if (!S_ISREG(inode->i_mode))
4459 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4462 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4463 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4465 trans = btrfs_start_transaction(root, 1);
4468 * setattr is responsible for setting the ordered_data_close flag,
4469 * but that is only tested during the last file release. That
4470 * could happen well after the next commit, leaving a great big
4471 * window where new writes may get lost if someone chooses to write
4472 * to this file after truncating to zero
4474 * The inode doesn't have any dirty data here, and so if we commit
4475 * this is a noop. If someone immediately starts writing to the inode
4476 * it is very likely we'll catch some of their writes in this
4477 * transaction, and the commit will find this file on the ordered
4478 * data list with good things to send down.
4480 * This is a best effort solution, there is still a window where
4481 * using truncate to replace the contents of the file will
4482 * end up with a zero length file after a crash.
4484 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4485 btrfs_add_ordered_operation(trans, root, inode);
4487 btrfs_set_trans_block_group(trans, inode);
4488 btrfs_i_size_write(inode, inode->i_size);
4490 ret = btrfs_orphan_add(trans, inode);
4493 /* FIXME, add redo link to tree so we don't leak on crash */
4494 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4495 BTRFS_EXTENT_DATA_KEY);
4496 btrfs_update_inode(trans, root, inode);
4498 ret = btrfs_orphan_del(trans, inode);
4502 nr = trans->blocks_used;
4503 ret = btrfs_end_transaction_throttle(trans, root);
4505 btrfs_btree_balance_dirty(root, nr);
4509 * create a new subvolume directory/inode (helper for the ioctl).
4511 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4512 struct btrfs_root *new_root, struct dentry *dentry,
4513 u64 new_dirid, u64 alloc_hint)
4515 struct inode *inode;
4519 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4520 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4522 return PTR_ERR(inode);
4523 inode->i_op = &btrfs_dir_inode_operations;
4524 inode->i_fop = &btrfs_dir_file_operations;
4527 btrfs_i_size_write(inode, 0);
4529 error = btrfs_update_inode(trans, new_root, inode);
4533 d_instantiate(dentry, inode);
4537 /* helper function for file defrag and space balancing. This
4538 * forces readahead on a given range of bytes in an inode
4540 unsigned long btrfs_force_ra(struct address_space *mapping,
4541 struct file_ra_state *ra, struct file *file,
4542 pgoff_t offset, pgoff_t last_index)
4544 pgoff_t req_size = last_index - offset + 1;
4546 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4547 return offset + req_size;
4550 struct inode *btrfs_alloc_inode(struct super_block *sb)
4552 struct btrfs_inode *ei;
4554 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4558 ei->logged_trans = 0;
4559 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4560 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4561 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4562 INIT_LIST_HEAD(&ei->i_orphan);
4563 INIT_LIST_HEAD(&ei->ordered_operations);
4564 return &ei->vfs_inode;
4567 void btrfs_destroy_inode(struct inode *inode)
4569 struct btrfs_ordered_extent *ordered;
4570 struct btrfs_root *root = BTRFS_I(inode)->root;
4572 WARN_ON(!list_empty(&inode->i_dentry));
4573 WARN_ON(inode->i_data.nrpages);
4575 if (BTRFS_I(inode)->i_acl &&
4576 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4577 posix_acl_release(BTRFS_I(inode)->i_acl);
4578 if (BTRFS_I(inode)->i_default_acl &&
4579 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4580 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4583 * Make sure we're properly removed from the ordered operation
4587 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4588 spin_lock(&root->fs_info->ordered_extent_lock);
4589 list_del_init(&BTRFS_I(inode)->ordered_operations);
4590 spin_unlock(&root->fs_info->ordered_extent_lock);
4593 spin_lock(&root->list_lock);
4594 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4595 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4596 " list\n", inode->i_ino);
4599 spin_unlock(&root->list_lock);
4602 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4606 printk(KERN_ERR "btrfs found ordered "
4607 "extent %llu %llu on inode cleanup\n",
4608 (unsigned long long)ordered->file_offset,
4609 (unsigned long long)ordered->len);
4610 btrfs_remove_ordered_extent(inode, ordered);
4611 btrfs_put_ordered_extent(ordered);
4612 btrfs_put_ordered_extent(ordered);
4615 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4616 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4619 static void init_once(void *foo)
4621 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4623 inode_init_once(&ei->vfs_inode);
4626 void btrfs_destroy_cachep(void)
4628 if (btrfs_inode_cachep)
4629 kmem_cache_destroy(btrfs_inode_cachep);
4630 if (btrfs_trans_handle_cachep)
4631 kmem_cache_destroy(btrfs_trans_handle_cachep);
4632 if (btrfs_transaction_cachep)
4633 kmem_cache_destroy(btrfs_transaction_cachep);
4634 if (btrfs_bit_radix_cachep)
4635 kmem_cache_destroy(btrfs_bit_radix_cachep);
4636 if (btrfs_path_cachep)
4637 kmem_cache_destroy(btrfs_path_cachep);
4640 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4641 unsigned long extra_flags,
4642 void (*ctor)(void *))
4644 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4645 SLAB_MEM_SPREAD | extra_flags), ctor);
4648 int btrfs_init_cachep(void)
4650 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4651 sizeof(struct btrfs_inode),
4653 if (!btrfs_inode_cachep)
4655 btrfs_trans_handle_cachep =
4656 btrfs_cache_create("btrfs_trans_handle_cache",
4657 sizeof(struct btrfs_trans_handle),
4659 if (!btrfs_trans_handle_cachep)
4661 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4662 sizeof(struct btrfs_transaction),
4664 if (!btrfs_transaction_cachep)
4666 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4667 sizeof(struct btrfs_path),
4669 if (!btrfs_path_cachep)
4671 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4672 SLAB_DESTROY_BY_RCU, NULL);
4673 if (!btrfs_bit_radix_cachep)
4677 btrfs_destroy_cachep();
4681 static int btrfs_getattr(struct vfsmount *mnt,
4682 struct dentry *dentry, struct kstat *stat)
4684 struct inode *inode = dentry->d_inode;
4685 generic_fillattr(inode, stat);
4686 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4687 stat->blksize = PAGE_CACHE_SIZE;
4688 stat->blocks = (inode_get_bytes(inode) +
4689 BTRFS_I(inode)->delalloc_bytes) >> 9;
4693 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4694 struct inode *new_dir, struct dentry *new_dentry)
4696 struct btrfs_trans_handle *trans;
4697 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4698 struct inode *new_inode = new_dentry->d_inode;
4699 struct inode *old_inode = old_dentry->d_inode;
4700 struct timespec ctime = CURRENT_TIME;
4704 /* we're not allowed to rename between subvolumes */
4705 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4706 BTRFS_I(new_dir)->root->root_key.objectid)
4709 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4710 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4714 /* to rename a snapshot or subvolume, we need to juggle the
4715 * backrefs. This isn't coded yet
4717 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4720 ret = btrfs_check_metadata_free_space(root);
4725 * we're using rename to replace one file with another.
4726 * and the replacement file is large. Start IO on it now so
4727 * we don't add too much work to the end of the transaction
4729 if (new_inode && old_inode && S_ISREG(old_inode->i_mode) &&
4730 new_inode->i_size &&
4731 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4732 filemap_flush(old_inode->i_mapping);
4734 trans = btrfs_start_transaction(root, 1);
4737 * make sure the inode gets flushed if it is replacing
4740 if (new_inode && new_inode->i_size &&
4741 old_inode && S_ISREG(old_inode->i_mode)) {
4742 btrfs_add_ordered_operation(trans, root, old_inode);
4746 * this is an ugly little race, but the rename is required to make
4747 * sure that if we crash, the inode is either at the old name
4748 * or the new one. pinning the log transaction lets us make sure
4749 * we don't allow a log commit to come in after we unlink the
4750 * name but before we add the new name back in.
4752 btrfs_pin_log_trans(root);
4754 btrfs_set_trans_block_group(trans, new_dir);
4756 btrfs_inc_nlink(old_dentry->d_inode);
4757 old_dir->i_ctime = old_dir->i_mtime = ctime;
4758 new_dir->i_ctime = new_dir->i_mtime = ctime;
4759 old_inode->i_ctime = ctime;
4761 if (old_dentry->d_parent != new_dentry->d_parent)
4762 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4764 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4765 old_dentry->d_name.name,
4766 old_dentry->d_name.len);
4771 new_inode->i_ctime = CURRENT_TIME;
4772 ret = btrfs_unlink_inode(trans, root, new_dir,
4773 new_dentry->d_inode,
4774 new_dentry->d_name.name,
4775 new_dentry->d_name.len);
4778 if (new_inode->i_nlink == 0) {
4779 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4785 ret = btrfs_set_inode_index(new_dir, &index);
4789 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4790 old_inode, new_dentry->d_name.name,
4791 new_dentry->d_name.len, 1, index);
4795 btrfs_log_new_name(trans, old_inode, old_dir,
4796 new_dentry->d_parent);
4799 /* this btrfs_end_log_trans just allows the current
4800 * log-sub transaction to complete
4802 btrfs_end_log_trans(root);
4803 btrfs_end_transaction_throttle(trans, root);
4809 * some fairly slow code that needs optimization. This walks the list
4810 * of all the inodes with pending delalloc and forces them to disk.
4812 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4814 struct list_head *head = &root->fs_info->delalloc_inodes;
4815 struct btrfs_inode *binode;
4816 struct inode *inode;
4818 if (root->fs_info->sb->s_flags & MS_RDONLY)
4821 spin_lock(&root->fs_info->delalloc_lock);
4822 while (!list_empty(head)) {
4823 binode = list_entry(head->next, struct btrfs_inode,
4825 inode = igrab(&binode->vfs_inode);
4827 list_del_init(&binode->delalloc_inodes);
4828 spin_unlock(&root->fs_info->delalloc_lock);
4830 filemap_flush(inode->i_mapping);
4834 spin_lock(&root->fs_info->delalloc_lock);
4836 spin_unlock(&root->fs_info->delalloc_lock);
4838 /* the filemap_flush will queue IO into the worker threads, but
4839 * we have to make sure the IO is actually started and that
4840 * ordered extents get created before we return
4842 atomic_inc(&root->fs_info->async_submit_draining);
4843 while (atomic_read(&root->fs_info->nr_async_submits) ||
4844 atomic_read(&root->fs_info->async_delalloc_pages)) {
4845 wait_event(root->fs_info->async_submit_wait,
4846 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4847 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4849 atomic_dec(&root->fs_info->async_submit_draining);
4853 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4854 const char *symname)
4856 struct btrfs_trans_handle *trans;
4857 struct btrfs_root *root = BTRFS_I(dir)->root;
4858 struct btrfs_path *path;
4859 struct btrfs_key key;
4860 struct inode *inode = NULL;
4868 struct btrfs_file_extent_item *ei;
4869 struct extent_buffer *leaf;
4870 unsigned long nr = 0;
4872 name_len = strlen(symname) + 1;
4873 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4874 return -ENAMETOOLONG;
4876 err = btrfs_check_metadata_free_space(root);
4880 trans = btrfs_start_transaction(root, 1);
4881 btrfs_set_trans_block_group(trans, dir);
4883 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4889 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4891 dentry->d_parent->d_inode->i_ino, objectid,
4892 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4894 err = PTR_ERR(inode);
4898 err = btrfs_init_inode_security(inode, dir);
4904 btrfs_set_trans_block_group(trans, inode);
4905 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4909 inode->i_mapping->a_ops = &btrfs_aops;
4910 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4911 inode->i_fop = &btrfs_file_operations;
4912 inode->i_op = &btrfs_file_inode_operations;
4913 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4915 dir->i_sb->s_dirt = 1;
4916 btrfs_update_inode_block_group(trans, inode);
4917 btrfs_update_inode_block_group(trans, dir);
4921 path = btrfs_alloc_path();
4923 key.objectid = inode->i_ino;
4925 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4926 datasize = btrfs_file_extent_calc_inline_size(name_len);
4927 err = btrfs_insert_empty_item(trans, root, path, &key,
4933 leaf = path->nodes[0];
4934 ei = btrfs_item_ptr(leaf, path->slots[0],
4935 struct btrfs_file_extent_item);
4936 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4937 btrfs_set_file_extent_type(leaf, ei,
4938 BTRFS_FILE_EXTENT_INLINE);
4939 btrfs_set_file_extent_encryption(leaf, ei, 0);
4940 btrfs_set_file_extent_compression(leaf, ei, 0);
4941 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4942 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4944 ptr = btrfs_file_extent_inline_start(ei);
4945 write_extent_buffer(leaf, symname, ptr, name_len);
4946 btrfs_mark_buffer_dirty(leaf);
4947 btrfs_free_path(path);
4949 inode->i_op = &btrfs_symlink_inode_operations;
4950 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4951 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4952 inode_set_bytes(inode, name_len);
4953 btrfs_i_size_write(inode, name_len - 1);
4954 err = btrfs_update_inode(trans, root, inode);
4959 nr = trans->blocks_used;
4960 btrfs_end_transaction_throttle(trans, root);
4963 inode_dec_link_count(inode);
4966 btrfs_btree_balance_dirty(root, nr);
4970 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4971 u64 alloc_hint, int mode)
4973 struct btrfs_trans_handle *trans;
4974 struct btrfs_root *root = BTRFS_I(inode)->root;
4975 struct btrfs_key ins;
4977 u64 cur_offset = start;
4978 u64 num_bytes = end - start;
4981 trans = btrfs_join_transaction(root, 1);
4983 btrfs_set_trans_block_group(trans, inode);
4985 while (num_bytes > 0) {
4986 alloc_size = min(num_bytes, root->fs_info->max_extent);
4987 ret = btrfs_reserve_extent(trans, root, alloc_size,
4988 root->sectorsize, 0, alloc_hint,
4994 ret = insert_reserved_file_extent(trans, inode,
4995 cur_offset, ins.objectid,
4996 ins.offset, ins.offset,
4997 ins.offset, 0, 0, 0,
4998 BTRFS_FILE_EXTENT_PREALLOC);
5000 num_bytes -= ins.offset;
5001 cur_offset += ins.offset;
5002 alloc_hint = ins.objectid + ins.offset;
5005 if (cur_offset > start) {
5006 inode->i_ctime = CURRENT_TIME;
5007 btrfs_set_flag(inode, PREALLOC);
5008 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5009 cur_offset > i_size_read(inode))
5010 btrfs_i_size_write(inode, cur_offset);
5011 ret = btrfs_update_inode(trans, root, inode);
5015 btrfs_end_transaction(trans, root);
5019 static long btrfs_fallocate(struct inode *inode, int mode,
5020 loff_t offset, loff_t len)
5027 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5028 struct extent_map *em;
5031 alloc_start = offset & ~mask;
5032 alloc_end = (offset + len + mask) & ~mask;
5034 mutex_lock(&inode->i_mutex);
5035 if (alloc_start > inode->i_size) {
5036 ret = btrfs_cont_expand(inode, alloc_start);
5042 struct btrfs_ordered_extent *ordered;
5043 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
5044 alloc_end - 1, GFP_NOFS);
5045 ordered = btrfs_lookup_first_ordered_extent(inode,
5048 ordered->file_offset + ordered->len > alloc_start &&
5049 ordered->file_offset < alloc_end) {
5050 btrfs_put_ordered_extent(ordered);
5051 unlock_extent(&BTRFS_I(inode)->io_tree,
5052 alloc_start, alloc_end - 1, GFP_NOFS);
5053 btrfs_wait_ordered_range(inode, alloc_start,
5054 alloc_end - alloc_start);
5057 btrfs_put_ordered_extent(ordered);
5062 cur_offset = alloc_start;
5064 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5065 alloc_end - cur_offset, 0);
5066 BUG_ON(IS_ERR(em) || !em);
5067 last_byte = min(extent_map_end(em), alloc_end);
5068 last_byte = (last_byte + mask) & ~mask;
5069 if (em->block_start == EXTENT_MAP_HOLE) {
5070 ret = prealloc_file_range(inode, cur_offset,
5071 last_byte, alloc_hint, mode);
5073 free_extent_map(em);
5077 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5078 alloc_hint = em->block_start;
5079 free_extent_map(em);
5081 cur_offset = last_byte;
5082 if (cur_offset >= alloc_end) {
5087 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
5090 mutex_unlock(&inode->i_mutex);
5094 static int btrfs_set_page_dirty(struct page *page)
5096 return __set_page_dirty_nobuffers(page);
5099 static int btrfs_permission(struct inode *inode, int mask)
5101 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
5103 return generic_permission(inode, mask, btrfs_check_acl);
5106 static struct inode_operations btrfs_dir_inode_operations = {
5107 .getattr = btrfs_getattr,
5108 .lookup = btrfs_lookup,
5109 .create = btrfs_create,
5110 .unlink = btrfs_unlink,
5112 .mkdir = btrfs_mkdir,
5113 .rmdir = btrfs_rmdir,
5114 .rename = btrfs_rename,
5115 .symlink = btrfs_symlink,
5116 .setattr = btrfs_setattr,
5117 .mknod = btrfs_mknod,
5118 .setxattr = btrfs_setxattr,
5119 .getxattr = btrfs_getxattr,
5120 .listxattr = btrfs_listxattr,
5121 .removexattr = btrfs_removexattr,
5122 .permission = btrfs_permission,
5124 static struct inode_operations btrfs_dir_ro_inode_operations = {
5125 .lookup = btrfs_lookup,
5126 .permission = btrfs_permission,
5128 static struct file_operations btrfs_dir_file_operations = {
5129 .llseek = generic_file_llseek,
5130 .read = generic_read_dir,
5131 .readdir = btrfs_real_readdir,
5132 .unlocked_ioctl = btrfs_ioctl,
5133 #ifdef CONFIG_COMPAT
5134 .compat_ioctl = btrfs_ioctl,
5136 .release = btrfs_release_file,
5137 .fsync = btrfs_sync_file,
5140 static struct extent_io_ops btrfs_extent_io_ops = {
5141 .fill_delalloc = run_delalloc_range,
5142 .submit_bio_hook = btrfs_submit_bio_hook,
5143 .merge_bio_hook = btrfs_merge_bio_hook,
5144 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5145 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5146 .writepage_start_hook = btrfs_writepage_start_hook,
5147 .readpage_io_failed_hook = btrfs_io_failed_hook,
5148 .set_bit_hook = btrfs_set_bit_hook,
5149 .clear_bit_hook = btrfs_clear_bit_hook,
5153 * btrfs doesn't support the bmap operation because swapfiles
5154 * use bmap to make a mapping of extents in the file. They assume
5155 * these extents won't change over the life of the file and they
5156 * use the bmap result to do IO directly to the drive.
5158 * the btrfs bmap call would return logical addresses that aren't
5159 * suitable for IO and they also will change frequently as COW
5160 * operations happen. So, swapfile + btrfs == corruption.
5162 * For now we're avoiding this by dropping bmap.
5164 static struct address_space_operations btrfs_aops = {
5165 .readpage = btrfs_readpage,
5166 .writepage = btrfs_writepage,
5167 .writepages = btrfs_writepages,
5168 .readpages = btrfs_readpages,
5169 .sync_page = block_sync_page,
5170 .direct_IO = btrfs_direct_IO,
5171 .invalidatepage = btrfs_invalidatepage,
5172 .releasepage = btrfs_releasepage,
5173 .set_page_dirty = btrfs_set_page_dirty,
5176 static struct address_space_operations btrfs_symlink_aops = {
5177 .readpage = btrfs_readpage,
5178 .writepage = btrfs_writepage,
5179 .invalidatepage = btrfs_invalidatepage,
5180 .releasepage = btrfs_releasepage,
5183 static struct inode_operations btrfs_file_inode_operations = {
5184 .truncate = btrfs_truncate,
5185 .getattr = btrfs_getattr,
5186 .setattr = btrfs_setattr,
5187 .setxattr = btrfs_setxattr,
5188 .getxattr = btrfs_getxattr,
5189 .listxattr = btrfs_listxattr,
5190 .removexattr = btrfs_removexattr,
5191 .permission = btrfs_permission,
5192 .fallocate = btrfs_fallocate,
5193 .fiemap = btrfs_fiemap,
5195 static struct inode_operations btrfs_special_inode_operations = {
5196 .getattr = btrfs_getattr,
5197 .setattr = btrfs_setattr,
5198 .permission = btrfs_permission,
5199 .setxattr = btrfs_setxattr,
5200 .getxattr = btrfs_getxattr,
5201 .listxattr = btrfs_listxattr,
5202 .removexattr = btrfs_removexattr,
5204 static struct inode_operations btrfs_symlink_inode_operations = {
5205 .readlink = generic_readlink,
5206 .follow_link = page_follow_link_light,
5207 .put_link = page_put_link,
5208 .permission = btrfs_permission,
5209 .setxattr = btrfs_setxattr,
5210 .getxattr = btrfs_getxattr,
5211 .listxattr = btrfs_listxattr,
5212 .removexattr = btrfs_removexattr,