2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
29 #include "print-tree.h"
30 #include "transaction.h"
33 #include "free-space-cache.h"
35 static int update_reserved_extents(struct btrfs_root *root,
36 u64 bytenr, u64 num, int reserve);
37 static int update_block_group(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 u64 bytenr, u64 num_bytes, int alloc,
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
61 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62 struct btrfs_root *extent_root, u64 alloc_bytes,
63 u64 flags, int force);
66 block_group_cache_done(struct btrfs_block_group_cache *cache)
69 return cache->cached == BTRFS_CACHE_FINISHED;
72 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
74 return (cache->flags & bits) == bits;
78 * this adds the block group to the fs_info rb tree for the block group
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
82 struct btrfs_block_group_cache *block_group)
85 struct rb_node *parent = NULL;
86 struct btrfs_block_group_cache *cache;
88 spin_lock(&info->block_group_cache_lock);
89 p = &info->block_group_cache_tree.rb_node;
93 cache = rb_entry(parent, struct btrfs_block_group_cache,
95 if (block_group->key.objectid < cache->key.objectid) {
97 } else if (block_group->key.objectid > cache->key.objectid) {
100 spin_unlock(&info->block_group_cache_lock);
105 rb_link_node(&block_group->cache_node, parent, p);
106 rb_insert_color(&block_group->cache_node,
107 &info->block_group_cache_tree);
108 spin_unlock(&info->block_group_cache_lock);
114 * This will return the block group at or after bytenr if contains is 0, else
115 * it will return the block group that contains the bytenr
117 static struct btrfs_block_group_cache *
118 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
121 struct btrfs_block_group_cache *cache, *ret = NULL;
125 spin_lock(&info->block_group_cache_lock);
126 n = info->block_group_cache_tree.rb_node;
129 cache = rb_entry(n, struct btrfs_block_group_cache,
131 end = cache->key.objectid + cache->key.offset - 1;
132 start = cache->key.objectid;
134 if (bytenr < start) {
135 if (!contains && (!ret || start < ret->key.objectid))
138 } else if (bytenr > start) {
139 if (contains && bytenr <= end) {
150 atomic_inc(&ret->count);
151 spin_unlock(&info->block_group_cache_lock);
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
162 void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
164 u64 start, end, last = 0;
168 ret = find_first_extent_bit(&info->pinned_extents, last,
170 EXTENT_LOCKED|EXTENT_DIRTY);
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
180 static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
183 struct btrfs_fs_info *fs_info = root->fs_info;
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
196 try_lock_extent(&fs_info->pinned_extents,
198 logical[nr] + stripe_len - 1, GFP_NOFS);
207 * this is only called by cache_block_group, since we could have freed extents
208 * we need to check the pinned_extents for any extents that can't be used yet
209 * since their free space will be released as soon as the transaction commits.
211 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
212 struct btrfs_fs_info *info, u64 start, u64 end)
214 u64 extent_start, extent_end, size, total_added = 0;
217 while (start < end) {
218 ret = find_first_extent_bit(&info->pinned_extents, start,
219 &extent_start, &extent_end,
220 EXTENT_DIRTY|EXTENT_LOCKED);
224 if (extent_start == start) {
225 start = extent_end + 1;
226 } else if (extent_start > start && extent_start < end) {
227 size = extent_start - start;
229 ret = btrfs_add_free_space(block_group, start,
232 start = extent_end + 1;
241 ret = btrfs_add_free_space(block_group, start, size);
248 static int caching_kthread(void *data)
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
253 struct btrfs_path *path;
255 struct btrfs_key key;
256 struct extent_buffer *leaf;
262 path = btrfs_alloc_path();
266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
269 /* need to make sure the commit_root doesn't disappear */
270 down_read(&fs_info->extent_commit_sem);
273 * We don't want to deadlock with somebody trying to allocate a new
274 * extent for the extent root while also trying to search the extent
275 * root to add free space. So we skip locking and search the commit
276 * root, since its read-only
278 path->skip_locking = 1;
279 path->search_commit_root = 1;
284 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
291 if (block_group->fs_info->closing > 1) {
296 leaf = path->nodes[0];
297 slot = path->slots[0];
298 if (slot >= btrfs_header_nritems(leaf)) {
299 ret = btrfs_next_leaf(fs_info->extent_root, path);
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info)) {
307 btrfs_release_path(fs_info->extent_root, path);
308 up_read(&fs_info->extent_commit_sem);
315 btrfs_item_key_to_cpu(leaf, &key, slot);
316 if (key.objectid < block_group->key.objectid)
319 if (key.objectid >= block_group->key.objectid +
320 block_group->key.offset)
323 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
324 total_found += add_new_free_space(block_group,
327 last = key.objectid + key.offset;
330 if (total_found > (1024 * 1024 * 2)) {
332 wake_up(&block_group->caching_q);
339 total_found += add_new_free_space(block_group, fs_info, last,
340 block_group->key.objectid +
341 block_group->key.offset);
343 spin_lock(&block_group->lock);
344 block_group->cached = BTRFS_CACHE_FINISHED;
345 spin_unlock(&block_group->lock);
348 btrfs_free_path(path);
349 up_read(&fs_info->extent_commit_sem);
350 atomic_dec(&block_group->space_info->caching_threads);
351 wake_up(&block_group->caching_q);
356 static int cache_block_group(struct btrfs_block_group_cache *cache)
358 struct task_struct *tsk;
361 spin_lock(&cache->lock);
362 if (cache->cached != BTRFS_CACHE_NO) {
363 spin_unlock(&cache->lock);
366 cache->cached = BTRFS_CACHE_STARTED;
367 spin_unlock(&cache->lock);
369 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
370 cache->key.objectid);
373 printk(KERN_ERR "error running thread %d\n", ret);
381 * return the block group that starts at or after bytenr
383 static struct btrfs_block_group_cache *
384 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
386 struct btrfs_block_group_cache *cache;
388 cache = block_group_cache_tree_search(info, bytenr, 0);
394 * return the block group that contains the given bytenr
396 struct btrfs_block_group_cache *btrfs_lookup_block_group(
397 struct btrfs_fs_info *info,
400 struct btrfs_block_group_cache *cache;
402 cache = block_group_cache_tree_search(info, bytenr, 1);
407 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
409 if (atomic_dec_and_test(&cache->count))
413 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
416 struct list_head *head = &info->space_info;
417 struct btrfs_space_info *found;
420 list_for_each_entry_rcu(found, head, list) {
421 if (found->flags == flags) {
431 * after adding space to the filesystem, we need to clear the full flags
432 * on all the space infos.
434 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
436 struct list_head *head = &info->space_info;
437 struct btrfs_space_info *found;
440 list_for_each_entry_rcu(found, head, list)
445 static u64 div_factor(u64 num, int factor)
454 u64 btrfs_find_block_group(struct btrfs_root *root,
455 u64 search_start, u64 search_hint, int owner)
457 struct btrfs_block_group_cache *cache;
459 u64 last = max(search_hint, search_start);
466 cache = btrfs_lookup_first_block_group(root->fs_info, last);
470 spin_lock(&cache->lock);
471 last = cache->key.objectid + cache->key.offset;
472 used = btrfs_block_group_used(&cache->item);
474 if ((full_search || !cache->ro) &&
475 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
476 if (used + cache->pinned + cache->reserved <
477 div_factor(cache->key.offset, factor)) {
478 group_start = cache->key.objectid;
479 spin_unlock(&cache->lock);
480 btrfs_put_block_group(cache);
484 spin_unlock(&cache->lock);
485 btrfs_put_block_group(cache);
493 if (!full_search && factor < 10) {
503 /* simple helper to search for an existing extent at a given offset */
504 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
507 struct btrfs_key key;
508 struct btrfs_path *path;
510 path = btrfs_alloc_path();
512 key.objectid = start;
514 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
515 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
517 btrfs_free_path(path);
522 * Back reference rules. Back refs have three main goals:
524 * 1) differentiate between all holders of references to an extent so that
525 * when a reference is dropped we can make sure it was a valid reference
526 * before freeing the extent.
528 * 2) Provide enough information to quickly find the holders of an extent
529 * if we notice a given block is corrupted or bad.
531 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
532 * maintenance. This is actually the same as #2, but with a slightly
533 * different use case.
535 * There are two kinds of back refs. The implicit back refs is optimized
536 * for pointers in non-shared tree blocks. For a given pointer in a block,
537 * back refs of this kind provide information about the block's owner tree
538 * and the pointer's key. These information allow us to find the block by
539 * b-tree searching. The full back refs is for pointers in tree blocks not
540 * referenced by their owner trees. The location of tree block is recorded
541 * in the back refs. Actually the full back refs is generic, and can be
542 * used in all cases the implicit back refs is used. The major shortcoming
543 * of the full back refs is its overhead. Every time a tree block gets
544 * COWed, we have to update back refs entry for all pointers in it.
546 * For a newly allocated tree block, we use implicit back refs for
547 * pointers in it. This means most tree related operations only involve
548 * implicit back refs. For a tree block created in old transaction, the
549 * only way to drop a reference to it is COW it. So we can detect the
550 * event that tree block loses its owner tree's reference and do the
551 * back refs conversion.
553 * When a tree block is COW'd through a tree, there are four cases:
555 * The reference count of the block is one and the tree is the block's
556 * owner tree. Nothing to do in this case.
558 * The reference count of the block is one and the tree is not the
559 * block's owner tree. In this case, full back refs is used for pointers
560 * in the block. Remove these full back refs, add implicit back refs for
561 * every pointers in the new block.
563 * The reference count of the block is greater than one and the tree is
564 * the block's owner tree. In this case, implicit back refs is used for
565 * pointers in the block. Add full back refs for every pointers in the
566 * block, increase lower level extents' reference counts. The original
567 * implicit back refs are entailed to the new block.
569 * The reference count of the block is greater than one and the tree is
570 * not the block's owner tree. Add implicit back refs for every pointer in
571 * the new block, increase lower level extents' reference count.
573 * Back Reference Key composing:
575 * The key objectid corresponds to the first byte in the extent,
576 * The key type is used to differentiate between types of back refs.
577 * There are different meanings of the key offset for different types
580 * File extents can be referenced by:
582 * - multiple snapshots, subvolumes, or different generations in one subvol
583 * - different files inside a single subvolume
584 * - different offsets inside a file (bookend extents in file.c)
586 * The extent ref structure for the implicit back refs has fields for:
588 * - Objectid of the subvolume root
589 * - objectid of the file holding the reference
590 * - original offset in the file
591 * - how many bookend extents
593 * The key offset for the implicit back refs is hash of the first
596 * The extent ref structure for the full back refs has field for:
598 * - number of pointers in the tree leaf
600 * The key offset for the implicit back refs is the first byte of
603 * When a file extent is allocated, The implicit back refs is used.
604 * the fields are filled in:
606 * (root_key.objectid, inode objectid, offset in file, 1)
608 * When a file extent is removed file truncation, we find the
609 * corresponding implicit back refs and check the following fields:
611 * (btrfs_header_owner(leaf), inode objectid, offset in file)
613 * Btree extents can be referenced by:
615 * - Different subvolumes
617 * Both the implicit back refs and the full back refs for tree blocks
618 * only consist of key. The key offset for the implicit back refs is
619 * objectid of block's owner tree. The key offset for the full back refs
620 * is the first byte of parent block.
622 * When implicit back refs is used, information about the lowest key and
623 * level of the tree block are required. These information are stored in
624 * tree block info structure.
627 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
628 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
629 struct btrfs_root *root,
630 struct btrfs_path *path,
631 u64 owner, u32 extra_size)
633 struct btrfs_extent_item *item;
634 struct btrfs_extent_item_v0 *ei0;
635 struct btrfs_extent_ref_v0 *ref0;
636 struct btrfs_tree_block_info *bi;
637 struct extent_buffer *leaf;
638 struct btrfs_key key;
639 struct btrfs_key found_key;
640 u32 new_size = sizeof(*item);
644 leaf = path->nodes[0];
645 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
647 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
648 ei0 = btrfs_item_ptr(leaf, path->slots[0],
649 struct btrfs_extent_item_v0);
650 refs = btrfs_extent_refs_v0(leaf, ei0);
652 if (owner == (u64)-1) {
654 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
655 ret = btrfs_next_leaf(root, path);
659 leaf = path->nodes[0];
661 btrfs_item_key_to_cpu(leaf, &found_key,
663 BUG_ON(key.objectid != found_key.objectid);
664 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
668 ref0 = btrfs_item_ptr(leaf, path->slots[0],
669 struct btrfs_extent_ref_v0);
670 owner = btrfs_ref_objectid_v0(leaf, ref0);
674 btrfs_release_path(root, path);
676 if (owner < BTRFS_FIRST_FREE_OBJECTID)
677 new_size += sizeof(*bi);
679 new_size -= sizeof(*ei0);
680 ret = btrfs_search_slot(trans, root, &key, path,
681 new_size + extra_size, 1);
686 ret = btrfs_extend_item(trans, root, path, new_size);
689 leaf = path->nodes[0];
690 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
691 btrfs_set_extent_refs(leaf, item, refs);
692 /* FIXME: get real generation */
693 btrfs_set_extent_generation(leaf, item, 0);
694 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
695 btrfs_set_extent_flags(leaf, item,
696 BTRFS_EXTENT_FLAG_TREE_BLOCK |
697 BTRFS_BLOCK_FLAG_FULL_BACKREF);
698 bi = (struct btrfs_tree_block_info *)(item + 1);
699 /* FIXME: get first key of the block */
700 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
701 btrfs_set_tree_block_level(leaf, bi, (int)owner);
703 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
705 btrfs_mark_buffer_dirty(leaf);
710 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
712 u32 high_crc = ~(u32)0;
713 u32 low_crc = ~(u32)0;
716 lenum = cpu_to_le64(root_objectid);
717 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
718 lenum = cpu_to_le64(owner);
719 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
720 lenum = cpu_to_le64(offset);
721 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
723 return ((u64)high_crc << 31) ^ (u64)low_crc;
726 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
727 struct btrfs_extent_data_ref *ref)
729 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
730 btrfs_extent_data_ref_objectid(leaf, ref),
731 btrfs_extent_data_ref_offset(leaf, ref));
734 static int match_extent_data_ref(struct extent_buffer *leaf,
735 struct btrfs_extent_data_ref *ref,
736 u64 root_objectid, u64 owner, u64 offset)
738 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
739 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
740 btrfs_extent_data_ref_offset(leaf, ref) != offset)
745 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
746 struct btrfs_root *root,
747 struct btrfs_path *path,
748 u64 bytenr, u64 parent,
750 u64 owner, u64 offset)
752 struct btrfs_key key;
753 struct btrfs_extent_data_ref *ref;
754 struct extent_buffer *leaf;
760 key.objectid = bytenr;
762 key.type = BTRFS_SHARED_DATA_REF_KEY;
765 key.type = BTRFS_EXTENT_DATA_REF_KEY;
766 key.offset = hash_extent_data_ref(root_objectid,
771 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
780 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
781 key.type = BTRFS_EXTENT_REF_V0_KEY;
782 btrfs_release_path(root, path);
783 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
794 leaf = path->nodes[0];
795 nritems = btrfs_header_nritems(leaf);
797 if (path->slots[0] >= nritems) {
798 ret = btrfs_next_leaf(root, path);
804 leaf = path->nodes[0];
805 nritems = btrfs_header_nritems(leaf);
809 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
810 if (key.objectid != bytenr ||
811 key.type != BTRFS_EXTENT_DATA_REF_KEY)
814 ref = btrfs_item_ptr(leaf, path->slots[0],
815 struct btrfs_extent_data_ref);
817 if (match_extent_data_ref(leaf, ref, root_objectid,
820 btrfs_release_path(root, path);
832 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
833 struct btrfs_root *root,
834 struct btrfs_path *path,
835 u64 bytenr, u64 parent,
836 u64 root_objectid, u64 owner,
837 u64 offset, int refs_to_add)
839 struct btrfs_key key;
840 struct extent_buffer *leaf;
845 key.objectid = bytenr;
847 key.type = BTRFS_SHARED_DATA_REF_KEY;
849 size = sizeof(struct btrfs_shared_data_ref);
851 key.type = BTRFS_EXTENT_DATA_REF_KEY;
852 key.offset = hash_extent_data_ref(root_objectid,
854 size = sizeof(struct btrfs_extent_data_ref);
857 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
858 if (ret && ret != -EEXIST)
861 leaf = path->nodes[0];
863 struct btrfs_shared_data_ref *ref;
864 ref = btrfs_item_ptr(leaf, path->slots[0],
865 struct btrfs_shared_data_ref);
867 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
869 num_refs = btrfs_shared_data_ref_count(leaf, ref);
870 num_refs += refs_to_add;
871 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
874 struct btrfs_extent_data_ref *ref;
875 while (ret == -EEXIST) {
876 ref = btrfs_item_ptr(leaf, path->slots[0],
877 struct btrfs_extent_data_ref);
878 if (match_extent_data_ref(leaf, ref, root_objectid,
881 btrfs_release_path(root, path);
883 ret = btrfs_insert_empty_item(trans, root, path, &key,
885 if (ret && ret != -EEXIST)
888 leaf = path->nodes[0];
890 ref = btrfs_item_ptr(leaf, path->slots[0],
891 struct btrfs_extent_data_ref);
893 btrfs_set_extent_data_ref_root(leaf, ref,
895 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
896 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
897 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
899 num_refs = btrfs_extent_data_ref_count(leaf, ref);
900 num_refs += refs_to_add;
901 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
904 btrfs_mark_buffer_dirty(leaf);
907 btrfs_release_path(root, path);
911 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
912 struct btrfs_root *root,
913 struct btrfs_path *path,
916 struct btrfs_key key;
917 struct btrfs_extent_data_ref *ref1 = NULL;
918 struct btrfs_shared_data_ref *ref2 = NULL;
919 struct extent_buffer *leaf;
923 leaf = path->nodes[0];
924 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
926 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
927 ref1 = btrfs_item_ptr(leaf, path->slots[0],
928 struct btrfs_extent_data_ref);
929 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
930 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
931 ref2 = btrfs_item_ptr(leaf, path->slots[0],
932 struct btrfs_shared_data_ref);
933 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
934 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
935 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
936 struct btrfs_extent_ref_v0 *ref0;
937 ref0 = btrfs_item_ptr(leaf, path->slots[0],
938 struct btrfs_extent_ref_v0);
939 num_refs = btrfs_ref_count_v0(leaf, ref0);
945 BUG_ON(num_refs < refs_to_drop);
946 num_refs -= refs_to_drop;
949 ret = btrfs_del_item(trans, root, path);
951 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
952 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
953 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
954 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
955 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
957 struct btrfs_extent_ref_v0 *ref0;
958 ref0 = btrfs_item_ptr(leaf, path->slots[0],
959 struct btrfs_extent_ref_v0);
960 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
963 btrfs_mark_buffer_dirty(leaf);
968 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
969 struct btrfs_path *path,
970 struct btrfs_extent_inline_ref *iref)
972 struct btrfs_key key;
973 struct extent_buffer *leaf;
974 struct btrfs_extent_data_ref *ref1;
975 struct btrfs_shared_data_ref *ref2;
978 leaf = path->nodes[0];
979 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
981 if (btrfs_extent_inline_ref_type(leaf, iref) ==
982 BTRFS_EXTENT_DATA_REF_KEY) {
983 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
984 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
986 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
987 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
989 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
990 ref1 = btrfs_item_ptr(leaf, path->slots[0],
991 struct btrfs_extent_data_ref);
992 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
993 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
994 ref2 = btrfs_item_ptr(leaf, path->slots[0],
995 struct btrfs_shared_data_ref);
996 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
997 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
998 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
999 struct btrfs_extent_ref_v0 *ref0;
1000 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1001 struct btrfs_extent_ref_v0);
1002 num_refs = btrfs_ref_count_v0(leaf, ref0);
1010 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root,
1012 struct btrfs_path *path,
1013 u64 bytenr, u64 parent,
1016 struct btrfs_key key;
1019 key.objectid = bytenr;
1021 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1022 key.offset = parent;
1024 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1025 key.offset = root_objectid;
1028 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1031 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1032 if (ret == -ENOENT && parent) {
1033 btrfs_release_path(root, path);
1034 key.type = BTRFS_EXTENT_REF_V0_KEY;
1035 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1043 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1044 struct btrfs_root *root,
1045 struct btrfs_path *path,
1046 u64 bytenr, u64 parent,
1049 struct btrfs_key key;
1052 key.objectid = bytenr;
1054 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1055 key.offset = parent;
1057 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1058 key.offset = root_objectid;
1061 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1062 btrfs_release_path(root, path);
1066 static inline int extent_ref_type(u64 parent, u64 owner)
1069 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1071 type = BTRFS_SHARED_BLOCK_REF_KEY;
1073 type = BTRFS_TREE_BLOCK_REF_KEY;
1076 type = BTRFS_SHARED_DATA_REF_KEY;
1078 type = BTRFS_EXTENT_DATA_REF_KEY;
1083 static int find_next_key(struct btrfs_path *path, int level,
1084 struct btrfs_key *key)
1087 for (; level < BTRFS_MAX_LEVEL; level++) {
1088 if (!path->nodes[level])
1090 if (path->slots[level] + 1 >=
1091 btrfs_header_nritems(path->nodes[level]))
1094 btrfs_item_key_to_cpu(path->nodes[level], key,
1095 path->slots[level] + 1);
1097 btrfs_node_key_to_cpu(path->nodes[level], key,
1098 path->slots[level] + 1);
1105 * look for inline back ref. if back ref is found, *ref_ret is set
1106 * to the address of inline back ref, and 0 is returned.
1108 * if back ref isn't found, *ref_ret is set to the address where it
1109 * should be inserted, and -ENOENT is returned.
1111 * if insert is true and there are too many inline back refs, the path
1112 * points to the extent item, and -EAGAIN is returned.
1114 * NOTE: inline back refs are ordered in the same way that back ref
1115 * items in the tree are ordered.
1117 static noinline_for_stack
1118 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1119 struct btrfs_root *root,
1120 struct btrfs_path *path,
1121 struct btrfs_extent_inline_ref **ref_ret,
1122 u64 bytenr, u64 num_bytes,
1123 u64 parent, u64 root_objectid,
1124 u64 owner, u64 offset, int insert)
1126 struct btrfs_key key;
1127 struct extent_buffer *leaf;
1128 struct btrfs_extent_item *ei;
1129 struct btrfs_extent_inline_ref *iref;
1140 key.objectid = bytenr;
1141 key.type = BTRFS_EXTENT_ITEM_KEY;
1142 key.offset = num_bytes;
1144 want = extent_ref_type(parent, owner);
1146 extra_size = btrfs_extent_inline_ref_size(want);
1147 path->keep_locks = 1;
1150 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1157 leaf = path->nodes[0];
1158 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1159 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1160 if (item_size < sizeof(*ei)) {
1165 ret = convert_extent_item_v0(trans, root, path, owner,
1171 leaf = path->nodes[0];
1172 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1175 BUG_ON(item_size < sizeof(*ei));
1177 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1178 flags = btrfs_extent_flags(leaf, ei);
1180 ptr = (unsigned long)(ei + 1);
1181 end = (unsigned long)ei + item_size;
1183 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1184 ptr += sizeof(struct btrfs_tree_block_info);
1187 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1196 iref = (struct btrfs_extent_inline_ref *)ptr;
1197 type = btrfs_extent_inline_ref_type(leaf, iref);
1201 ptr += btrfs_extent_inline_ref_size(type);
1205 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1206 struct btrfs_extent_data_ref *dref;
1207 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1208 if (match_extent_data_ref(leaf, dref, root_objectid,
1213 if (hash_extent_data_ref_item(leaf, dref) <
1214 hash_extent_data_ref(root_objectid, owner, offset))
1218 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1220 if (parent == ref_offset) {
1224 if (ref_offset < parent)
1227 if (root_objectid == ref_offset) {
1231 if (ref_offset < root_objectid)
1235 ptr += btrfs_extent_inline_ref_size(type);
1237 if (err == -ENOENT && insert) {
1238 if (item_size + extra_size >=
1239 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1244 * To add new inline back ref, we have to make sure
1245 * there is no corresponding back ref item.
1246 * For simplicity, we just do not add new inline back
1247 * ref if there is any kind of item for this block
1249 if (find_next_key(path, 0, &key) == 0 &&
1250 key.objectid == bytenr &&
1251 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1256 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1259 path->keep_locks = 0;
1260 btrfs_unlock_up_safe(path, 1);
1266 * helper to add new inline back ref
1268 static noinline_for_stack
1269 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1270 struct btrfs_root *root,
1271 struct btrfs_path *path,
1272 struct btrfs_extent_inline_ref *iref,
1273 u64 parent, u64 root_objectid,
1274 u64 owner, u64 offset, int refs_to_add,
1275 struct btrfs_delayed_extent_op *extent_op)
1277 struct extent_buffer *leaf;
1278 struct btrfs_extent_item *ei;
1281 unsigned long item_offset;
1287 leaf = path->nodes[0];
1288 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1289 item_offset = (unsigned long)iref - (unsigned long)ei;
1291 type = extent_ref_type(parent, owner);
1292 size = btrfs_extent_inline_ref_size(type);
1294 ret = btrfs_extend_item(trans, root, path, size);
1297 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1298 refs = btrfs_extent_refs(leaf, ei);
1299 refs += refs_to_add;
1300 btrfs_set_extent_refs(leaf, ei, refs);
1302 __run_delayed_extent_op(extent_op, leaf, ei);
1304 ptr = (unsigned long)ei + item_offset;
1305 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1306 if (ptr < end - size)
1307 memmove_extent_buffer(leaf, ptr + size, ptr,
1310 iref = (struct btrfs_extent_inline_ref *)ptr;
1311 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1312 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1313 struct btrfs_extent_data_ref *dref;
1314 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1315 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1316 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1317 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1318 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1319 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1320 struct btrfs_shared_data_ref *sref;
1321 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1322 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1323 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1324 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1325 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1327 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1329 btrfs_mark_buffer_dirty(leaf);
1333 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1334 struct btrfs_root *root,
1335 struct btrfs_path *path,
1336 struct btrfs_extent_inline_ref **ref_ret,
1337 u64 bytenr, u64 num_bytes, u64 parent,
1338 u64 root_objectid, u64 owner, u64 offset)
1342 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1343 bytenr, num_bytes, parent,
1344 root_objectid, owner, offset, 0);
1348 btrfs_release_path(root, path);
1351 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1352 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1355 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1356 root_objectid, owner, offset);
1362 * helper to update/remove inline back ref
1364 static noinline_for_stack
1365 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1366 struct btrfs_root *root,
1367 struct btrfs_path *path,
1368 struct btrfs_extent_inline_ref *iref,
1370 struct btrfs_delayed_extent_op *extent_op)
1372 struct extent_buffer *leaf;
1373 struct btrfs_extent_item *ei;
1374 struct btrfs_extent_data_ref *dref = NULL;
1375 struct btrfs_shared_data_ref *sref = NULL;
1384 leaf = path->nodes[0];
1385 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1386 refs = btrfs_extent_refs(leaf, ei);
1387 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1388 refs += refs_to_mod;
1389 btrfs_set_extent_refs(leaf, ei, refs);
1391 __run_delayed_extent_op(extent_op, leaf, ei);
1393 type = btrfs_extent_inline_ref_type(leaf, iref);
1395 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1396 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1397 refs = btrfs_extent_data_ref_count(leaf, dref);
1398 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1399 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1400 refs = btrfs_shared_data_ref_count(leaf, sref);
1403 BUG_ON(refs_to_mod != -1);
1406 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1407 refs += refs_to_mod;
1410 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1411 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1413 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1415 size = btrfs_extent_inline_ref_size(type);
1416 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1417 ptr = (unsigned long)iref;
1418 end = (unsigned long)ei + item_size;
1419 if (ptr + size < end)
1420 memmove_extent_buffer(leaf, ptr, ptr + size,
1423 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1426 btrfs_mark_buffer_dirty(leaf);
1430 static noinline_for_stack
1431 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1432 struct btrfs_root *root,
1433 struct btrfs_path *path,
1434 u64 bytenr, u64 num_bytes, u64 parent,
1435 u64 root_objectid, u64 owner,
1436 u64 offset, int refs_to_add,
1437 struct btrfs_delayed_extent_op *extent_op)
1439 struct btrfs_extent_inline_ref *iref;
1442 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1443 bytenr, num_bytes, parent,
1444 root_objectid, owner, offset, 1);
1446 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1447 ret = update_inline_extent_backref(trans, root, path, iref,
1448 refs_to_add, extent_op);
1449 } else if (ret == -ENOENT) {
1450 ret = setup_inline_extent_backref(trans, root, path, iref,
1451 parent, root_objectid,
1452 owner, offset, refs_to_add,
1458 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1459 struct btrfs_root *root,
1460 struct btrfs_path *path,
1461 u64 bytenr, u64 parent, u64 root_objectid,
1462 u64 owner, u64 offset, int refs_to_add)
1465 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1466 BUG_ON(refs_to_add != 1);
1467 ret = insert_tree_block_ref(trans, root, path, bytenr,
1468 parent, root_objectid);
1470 ret = insert_extent_data_ref(trans, root, path, bytenr,
1471 parent, root_objectid,
1472 owner, offset, refs_to_add);
1477 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1478 struct btrfs_root *root,
1479 struct btrfs_path *path,
1480 struct btrfs_extent_inline_ref *iref,
1481 int refs_to_drop, int is_data)
1485 BUG_ON(!is_data && refs_to_drop != 1);
1487 ret = update_inline_extent_backref(trans, root, path, iref,
1488 -refs_to_drop, NULL);
1489 } else if (is_data) {
1490 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1492 ret = btrfs_del_item(trans, root, path);
1497 #ifdef BIO_RW_DISCARD
1498 static void btrfs_issue_discard(struct block_device *bdev,
1501 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1505 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1508 #ifdef BIO_RW_DISCARD
1510 u64 map_length = num_bytes;
1511 struct btrfs_multi_bio *multi = NULL;
1513 /* Tell the block device(s) that the sectors can be discarded */
1514 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1515 bytenr, &map_length, &multi, 0);
1517 struct btrfs_bio_stripe *stripe = multi->stripes;
1520 if (map_length > num_bytes)
1521 map_length = num_bytes;
1523 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1524 btrfs_issue_discard(stripe->dev->bdev,
1537 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1538 struct btrfs_root *root,
1539 u64 bytenr, u64 num_bytes, u64 parent,
1540 u64 root_objectid, u64 owner, u64 offset)
1543 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1544 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1546 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1547 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1548 parent, root_objectid, (int)owner,
1549 BTRFS_ADD_DELAYED_REF, NULL);
1551 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1552 parent, root_objectid, owner, offset,
1553 BTRFS_ADD_DELAYED_REF, NULL);
1558 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root,
1560 u64 bytenr, u64 num_bytes,
1561 u64 parent, u64 root_objectid,
1562 u64 owner, u64 offset, int refs_to_add,
1563 struct btrfs_delayed_extent_op *extent_op)
1565 struct btrfs_path *path;
1566 struct extent_buffer *leaf;
1567 struct btrfs_extent_item *item;
1572 path = btrfs_alloc_path();
1577 path->leave_spinning = 1;
1578 /* this will setup the path even if it fails to insert the back ref */
1579 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1580 path, bytenr, num_bytes, parent,
1581 root_objectid, owner, offset,
1582 refs_to_add, extent_op);
1586 if (ret != -EAGAIN) {
1591 leaf = path->nodes[0];
1592 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1593 refs = btrfs_extent_refs(leaf, item);
1594 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1596 __run_delayed_extent_op(extent_op, leaf, item);
1598 btrfs_mark_buffer_dirty(leaf);
1599 btrfs_release_path(root->fs_info->extent_root, path);
1602 path->leave_spinning = 1;
1604 /* now insert the actual backref */
1605 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1606 path, bytenr, parent, root_objectid,
1607 owner, offset, refs_to_add);
1610 btrfs_free_path(path);
1614 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1615 struct btrfs_root *root,
1616 struct btrfs_delayed_ref_node *node,
1617 struct btrfs_delayed_extent_op *extent_op,
1618 int insert_reserved)
1621 struct btrfs_delayed_data_ref *ref;
1622 struct btrfs_key ins;
1627 ins.objectid = node->bytenr;
1628 ins.offset = node->num_bytes;
1629 ins.type = BTRFS_EXTENT_ITEM_KEY;
1631 ref = btrfs_delayed_node_to_data_ref(node);
1632 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1633 parent = ref->parent;
1635 ref_root = ref->root;
1637 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1639 BUG_ON(extent_op->update_key);
1640 flags |= extent_op->flags_to_set;
1642 ret = alloc_reserved_file_extent(trans, root,
1643 parent, ref_root, flags,
1644 ref->objectid, ref->offset,
1645 &ins, node->ref_mod);
1646 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1647 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1648 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1649 node->num_bytes, parent,
1650 ref_root, ref->objectid,
1651 ref->offset, node->ref_mod,
1653 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1654 ret = __btrfs_free_extent(trans, root, node->bytenr,
1655 node->num_bytes, parent,
1656 ref_root, ref->objectid,
1657 ref->offset, node->ref_mod,
1665 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1666 struct extent_buffer *leaf,
1667 struct btrfs_extent_item *ei)
1669 u64 flags = btrfs_extent_flags(leaf, ei);
1670 if (extent_op->update_flags) {
1671 flags |= extent_op->flags_to_set;
1672 btrfs_set_extent_flags(leaf, ei, flags);
1675 if (extent_op->update_key) {
1676 struct btrfs_tree_block_info *bi;
1677 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1678 bi = (struct btrfs_tree_block_info *)(ei + 1);
1679 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1683 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *root,
1685 struct btrfs_delayed_ref_node *node,
1686 struct btrfs_delayed_extent_op *extent_op)
1688 struct btrfs_key key;
1689 struct btrfs_path *path;
1690 struct btrfs_extent_item *ei;
1691 struct extent_buffer *leaf;
1696 path = btrfs_alloc_path();
1700 key.objectid = node->bytenr;
1701 key.type = BTRFS_EXTENT_ITEM_KEY;
1702 key.offset = node->num_bytes;
1705 path->leave_spinning = 1;
1706 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1717 leaf = path->nodes[0];
1718 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1719 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1720 if (item_size < sizeof(*ei)) {
1721 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1727 leaf = path->nodes[0];
1728 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1731 BUG_ON(item_size < sizeof(*ei));
1732 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1733 __run_delayed_extent_op(extent_op, leaf, ei);
1735 btrfs_mark_buffer_dirty(leaf);
1737 btrfs_free_path(path);
1741 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1742 struct btrfs_root *root,
1743 struct btrfs_delayed_ref_node *node,
1744 struct btrfs_delayed_extent_op *extent_op,
1745 int insert_reserved)
1748 struct btrfs_delayed_tree_ref *ref;
1749 struct btrfs_key ins;
1753 ins.objectid = node->bytenr;
1754 ins.offset = node->num_bytes;
1755 ins.type = BTRFS_EXTENT_ITEM_KEY;
1757 ref = btrfs_delayed_node_to_tree_ref(node);
1758 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1759 parent = ref->parent;
1761 ref_root = ref->root;
1763 BUG_ON(node->ref_mod != 1);
1764 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1765 BUG_ON(!extent_op || !extent_op->update_flags ||
1766 !extent_op->update_key);
1767 ret = alloc_reserved_tree_block(trans, root,
1769 extent_op->flags_to_set,
1772 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1773 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1774 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1775 node->num_bytes, parent, ref_root,
1776 ref->level, 0, 1, extent_op);
1777 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1778 ret = __btrfs_free_extent(trans, root, node->bytenr,
1779 node->num_bytes, parent, ref_root,
1780 ref->level, 0, 1, extent_op);
1788 /* helper function to actually process a single delayed ref entry */
1789 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1790 struct btrfs_root *root,
1791 struct btrfs_delayed_ref_node *node,
1792 struct btrfs_delayed_extent_op *extent_op,
1793 int insert_reserved)
1796 if (btrfs_delayed_ref_is_head(node)) {
1797 struct btrfs_delayed_ref_head *head;
1799 * we've hit the end of the chain and we were supposed
1800 * to insert this extent into the tree. But, it got
1801 * deleted before we ever needed to insert it, so all
1802 * we have to do is clean up the accounting
1805 head = btrfs_delayed_node_to_head(node);
1806 if (insert_reserved) {
1807 if (head->is_data) {
1808 ret = btrfs_del_csums(trans, root,
1813 btrfs_update_pinned_extents(root, node->bytenr,
1814 node->num_bytes, 1);
1815 update_reserved_extents(root, node->bytenr,
1816 node->num_bytes, 0);
1818 mutex_unlock(&head->mutex);
1822 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1823 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1824 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1826 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1827 node->type == BTRFS_SHARED_DATA_REF_KEY)
1828 ret = run_delayed_data_ref(trans, root, node, extent_op,
1835 static noinline struct btrfs_delayed_ref_node *
1836 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1838 struct rb_node *node;
1839 struct btrfs_delayed_ref_node *ref;
1840 int action = BTRFS_ADD_DELAYED_REF;
1843 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1844 * this prevents ref count from going down to zero when
1845 * there still are pending delayed ref.
1847 node = rb_prev(&head->node.rb_node);
1851 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1853 if (ref->bytenr != head->node.bytenr)
1855 if (ref->action == action)
1857 node = rb_prev(node);
1859 if (action == BTRFS_ADD_DELAYED_REF) {
1860 action = BTRFS_DROP_DELAYED_REF;
1866 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1867 struct btrfs_root *root,
1868 struct list_head *cluster)
1870 struct btrfs_delayed_ref_root *delayed_refs;
1871 struct btrfs_delayed_ref_node *ref;
1872 struct btrfs_delayed_ref_head *locked_ref = NULL;
1873 struct btrfs_delayed_extent_op *extent_op;
1876 int must_insert_reserved = 0;
1878 delayed_refs = &trans->transaction->delayed_refs;
1881 /* pick a new head ref from the cluster list */
1882 if (list_empty(cluster))
1885 locked_ref = list_entry(cluster->next,
1886 struct btrfs_delayed_ref_head, cluster);
1888 /* grab the lock that says we are going to process
1889 * all the refs for this head */
1890 ret = btrfs_delayed_ref_lock(trans, locked_ref);
1893 * we may have dropped the spin lock to get the head
1894 * mutex lock, and that might have given someone else
1895 * time to free the head. If that's true, it has been
1896 * removed from our list and we can move on.
1898 if (ret == -EAGAIN) {
1906 * record the must insert reserved flag before we
1907 * drop the spin lock.
1909 must_insert_reserved = locked_ref->must_insert_reserved;
1910 locked_ref->must_insert_reserved = 0;
1912 extent_op = locked_ref->extent_op;
1913 locked_ref->extent_op = NULL;
1916 * locked_ref is the head node, so we have to go one
1917 * node back for any delayed ref updates
1919 ref = select_delayed_ref(locked_ref);
1921 /* All delayed refs have been processed, Go ahead
1922 * and send the head node to run_one_delayed_ref,
1923 * so that any accounting fixes can happen
1925 ref = &locked_ref->node;
1927 if (extent_op && must_insert_reserved) {
1933 spin_unlock(&delayed_refs->lock);
1935 ret = run_delayed_extent_op(trans, root,
1941 spin_lock(&delayed_refs->lock);
1945 list_del_init(&locked_ref->cluster);
1950 rb_erase(&ref->rb_node, &delayed_refs->root);
1951 delayed_refs->num_entries--;
1953 spin_unlock(&delayed_refs->lock);
1955 ret = run_one_delayed_ref(trans, root, ref, extent_op,
1956 must_insert_reserved);
1959 btrfs_put_delayed_ref(ref);
1964 spin_lock(&delayed_refs->lock);
1970 * this starts processing the delayed reference count updates and
1971 * extent insertions we have queued up so far. count can be
1972 * 0, which means to process everything in the tree at the start
1973 * of the run (but not newly added entries), or it can be some target
1974 * number you'd like to process.
1976 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1977 struct btrfs_root *root, unsigned long count)
1979 struct rb_node *node;
1980 struct btrfs_delayed_ref_root *delayed_refs;
1981 struct btrfs_delayed_ref_node *ref;
1982 struct list_head cluster;
1984 int run_all = count == (unsigned long)-1;
1987 if (root == root->fs_info->extent_root)
1988 root = root->fs_info->tree_root;
1990 delayed_refs = &trans->transaction->delayed_refs;
1991 INIT_LIST_HEAD(&cluster);
1993 spin_lock(&delayed_refs->lock);
1995 count = delayed_refs->num_entries * 2;
1999 if (!(run_all || run_most) &&
2000 delayed_refs->num_heads_ready < 64)
2004 * go find something we can process in the rbtree. We start at
2005 * the beginning of the tree, and then build a cluster
2006 * of refs to process starting at the first one we are able to
2009 ret = btrfs_find_ref_cluster(trans, &cluster,
2010 delayed_refs->run_delayed_start);
2014 ret = run_clustered_refs(trans, root, &cluster);
2017 count -= min_t(unsigned long, ret, count);
2024 node = rb_first(&delayed_refs->root);
2027 count = (unsigned long)-1;
2030 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2032 if (btrfs_delayed_ref_is_head(ref)) {
2033 struct btrfs_delayed_ref_head *head;
2035 head = btrfs_delayed_node_to_head(ref);
2036 atomic_inc(&ref->refs);
2038 spin_unlock(&delayed_refs->lock);
2039 mutex_lock(&head->mutex);
2040 mutex_unlock(&head->mutex);
2042 btrfs_put_delayed_ref(ref);
2046 node = rb_next(node);
2048 spin_unlock(&delayed_refs->lock);
2049 schedule_timeout(1);
2053 spin_unlock(&delayed_refs->lock);
2057 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2058 struct btrfs_root *root,
2059 u64 bytenr, u64 num_bytes, u64 flags,
2062 struct btrfs_delayed_extent_op *extent_op;
2065 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2069 extent_op->flags_to_set = flags;
2070 extent_op->update_flags = 1;
2071 extent_op->update_key = 0;
2072 extent_op->is_data = is_data ? 1 : 0;
2074 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2080 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2081 struct btrfs_root *root,
2082 struct btrfs_path *path,
2083 u64 objectid, u64 offset, u64 bytenr)
2085 struct btrfs_delayed_ref_head *head;
2086 struct btrfs_delayed_ref_node *ref;
2087 struct btrfs_delayed_data_ref *data_ref;
2088 struct btrfs_delayed_ref_root *delayed_refs;
2089 struct rb_node *node;
2093 delayed_refs = &trans->transaction->delayed_refs;
2094 spin_lock(&delayed_refs->lock);
2095 head = btrfs_find_delayed_ref_head(trans, bytenr);
2099 if (!mutex_trylock(&head->mutex)) {
2100 atomic_inc(&head->node.refs);
2101 spin_unlock(&delayed_refs->lock);
2103 btrfs_release_path(root->fs_info->extent_root, path);
2105 mutex_lock(&head->mutex);
2106 mutex_unlock(&head->mutex);
2107 btrfs_put_delayed_ref(&head->node);
2111 node = rb_prev(&head->node.rb_node);
2115 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2117 if (ref->bytenr != bytenr)
2121 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2124 data_ref = btrfs_delayed_node_to_data_ref(ref);
2126 node = rb_prev(node);
2128 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2129 if (ref->bytenr == bytenr)
2133 if (data_ref->root != root->root_key.objectid ||
2134 data_ref->objectid != objectid || data_ref->offset != offset)
2139 mutex_unlock(&head->mutex);
2141 spin_unlock(&delayed_refs->lock);
2145 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2146 struct btrfs_root *root,
2147 struct btrfs_path *path,
2148 u64 objectid, u64 offset, u64 bytenr)
2150 struct btrfs_root *extent_root = root->fs_info->extent_root;
2151 struct extent_buffer *leaf;
2152 struct btrfs_extent_data_ref *ref;
2153 struct btrfs_extent_inline_ref *iref;
2154 struct btrfs_extent_item *ei;
2155 struct btrfs_key key;
2159 key.objectid = bytenr;
2160 key.offset = (u64)-1;
2161 key.type = BTRFS_EXTENT_ITEM_KEY;
2163 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2169 if (path->slots[0] == 0)
2173 leaf = path->nodes[0];
2174 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2176 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2180 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2181 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2182 if (item_size < sizeof(*ei)) {
2183 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2187 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2189 if (item_size != sizeof(*ei) +
2190 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2193 if (btrfs_extent_generation(leaf, ei) <=
2194 btrfs_root_last_snapshot(&root->root_item))
2197 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2198 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2199 BTRFS_EXTENT_DATA_REF_KEY)
2202 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2203 if (btrfs_extent_refs(leaf, ei) !=
2204 btrfs_extent_data_ref_count(leaf, ref) ||
2205 btrfs_extent_data_ref_root(leaf, ref) !=
2206 root->root_key.objectid ||
2207 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2208 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2216 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2217 struct btrfs_root *root,
2218 u64 objectid, u64 offset, u64 bytenr)
2220 struct btrfs_path *path;
2224 path = btrfs_alloc_path();
2229 ret = check_committed_ref(trans, root, path, objectid,
2231 if (ret && ret != -ENOENT)
2234 ret2 = check_delayed_ref(trans, root, path, objectid,
2236 } while (ret2 == -EAGAIN);
2238 if (ret2 && ret2 != -ENOENT) {
2243 if (ret != -ENOENT || ret2 != -ENOENT)
2246 btrfs_free_path(path);
2251 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2252 struct extent_buffer *buf, u32 nr_extents)
2254 struct btrfs_key key;
2255 struct btrfs_file_extent_item *fi;
2263 if (!root->ref_cows)
2266 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2268 root_gen = root->root_key.offset;
2271 root_gen = trans->transid - 1;
2274 level = btrfs_header_level(buf);
2275 nritems = btrfs_header_nritems(buf);
2278 struct btrfs_leaf_ref *ref;
2279 struct btrfs_extent_info *info;
2281 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2287 ref->root_gen = root_gen;
2288 ref->bytenr = buf->start;
2289 ref->owner = btrfs_header_owner(buf);
2290 ref->generation = btrfs_header_generation(buf);
2291 ref->nritems = nr_extents;
2292 info = ref->extents;
2294 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2296 btrfs_item_key_to_cpu(buf, &key, i);
2297 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2299 fi = btrfs_item_ptr(buf, i,
2300 struct btrfs_file_extent_item);
2301 if (btrfs_file_extent_type(buf, fi) ==
2302 BTRFS_FILE_EXTENT_INLINE)
2304 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2305 if (disk_bytenr == 0)
2308 info->bytenr = disk_bytenr;
2310 btrfs_file_extent_disk_num_bytes(buf, fi);
2311 info->objectid = key.objectid;
2312 info->offset = key.offset;
2316 ret = btrfs_add_leaf_ref(root, ref, shared);
2317 if (ret == -EEXIST && shared) {
2318 struct btrfs_leaf_ref *old;
2319 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2321 btrfs_remove_leaf_ref(root, old);
2322 btrfs_free_leaf_ref(root, old);
2323 ret = btrfs_add_leaf_ref(root, ref, shared);
2326 btrfs_free_leaf_ref(root, ref);
2332 /* when a block goes through cow, we update the reference counts of
2333 * everything that block points to. The internal pointers of the block
2334 * can be in just about any order, and it is likely to have clusters of
2335 * things that are close together and clusters of things that are not.
2337 * To help reduce the seeks that come with updating all of these reference
2338 * counts, sort them by byte number before actual updates are done.
2340 * struct refsort is used to match byte number to slot in the btree block.
2341 * we sort based on the byte number and then use the slot to actually
2344 * struct refsort is smaller than strcut btrfs_item and smaller than
2345 * struct btrfs_key_ptr. Since we're currently limited to the page size
2346 * for a btree block, there's no way for a kmalloc of refsorts for a
2347 * single node to be bigger than a page.
2355 * for passing into sort()
2357 static int refsort_cmp(const void *a_void, const void *b_void)
2359 const struct refsort *a = a_void;
2360 const struct refsort *b = b_void;
2362 if (a->bytenr < b->bytenr)
2364 if (a->bytenr > b->bytenr)
2370 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2371 struct btrfs_root *root,
2372 struct extent_buffer *buf,
2373 int full_backref, int inc)
2380 struct btrfs_key key;
2381 struct btrfs_file_extent_item *fi;
2385 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2386 u64, u64, u64, u64, u64, u64);
2388 ref_root = btrfs_header_owner(buf);
2389 nritems = btrfs_header_nritems(buf);
2390 level = btrfs_header_level(buf);
2392 if (!root->ref_cows && level == 0)
2396 process_func = btrfs_inc_extent_ref;
2398 process_func = btrfs_free_extent;
2401 parent = buf->start;
2405 for (i = 0; i < nritems; i++) {
2407 btrfs_item_key_to_cpu(buf, &key, i);
2408 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2410 fi = btrfs_item_ptr(buf, i,
2411 struct btrfs_file_extent_item);
2412 if (btrfs_file_extent_type(buf, fi) ==
2413 BTRFS_FILE_EXTENT_INLINE)
2415 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2419 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2420 key.offset -= btrfs_file_extent_offset(buf, fi);
2421 ret = process_func(trans, root, bytenr, num_bytes,
2422 parent, ref_root, key.objectid,
2427 bytenr = btrfs_node_blockptr(buf, i);
2428 num_bytes = btrfs_level_size(root, level - 1);
2429 ret = process_func(trans, root, bytenr, num_bytes,
2430 parent, ref_root, level - 1, 0);
2441 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2442 struct extent_buffer *buf, int full_backref)
2444 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2447 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2448 struct extent_buffer *buf, int full_backref)
2450 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2453 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2454 struct btrfs_root *root,
2455 struct btrfs_path *path,
2456 struct btrfs_block_group_cache *cache)
2459 struct btrfs_root *extent_root = root->fs_info->extent_root;
2461 struct extent_buffer *leaf;
2463 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2468 leaf = path->nodes[0];
2469 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2470 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2471 btrfs_mark_buffer_dirty(leaf);
2472 btrfs_release_path(extent_root, path);
2480 static struct btrfs_block_group_cache *
2481 next_block_group(struct btrfs_root *root,
2482 struct btrfs_block_group_cache *cache)
2484 struct rb_node *node;
2485 spin_lock(&root->fs_info->block_group_cache_lock);
2486 node = rb_next(&cache->cache_node);
2487 btrfs_put_block_group(cache);
2489 cache = rb_entry(node, struct btrfs_block_group_cache,
2491 atomic_inc(&cache->count);
2494 spin_unlock(&root->fs_info->block_group_cache_lock);
2498 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2499 struct btrfs_root *root)
2501 struct btrfs_block_group_cache *cache;
2503 struct btrfs_path *path;
2506 path = btrfs_alloc_path();
2512 err = btrfs_run_delayed_refs(trans, root,
2517 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2521 cache = next_block_group(root, cache);
2531 last = cache->key.objectid + cache->key.offset;
2533 err = write_one_cache_group(trans, root, path, cache);
2535 btrfs_put_block_group(cache);
2538 btrfs_free_path(path);
2542 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2544 struct btrfs_block_group_cache *block_group;
2547 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2548 if (!block_group || block_group->ro)
2551 btrfs_put_block_group(block_group);
2555 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2556 u64 total_bytes, u64 bytes_used,
2557 struct btrfs_space_info **space_info)
2559 struct btrfs_space_info *found;
2561 found = __find_space_info(info, flags);
2563 spin_lock(&found->lock);
2564 found->total_bytes += total_bytes;
2565 found->bytes_used += bytes_used;
2567 spin_unlock(&found->lock);
2568 *space_info = found;
2571 found = kzalloc(sizeof(*found), GFP_NOFS);
2575 INIT_LIST_HEAD(&found->block_groups);
2576 init_rwsem(&found->groups_sem);
2577 spin_lock_init(&found->lock);
2578 found->flags = flags;
2579 found->total_bytes = total_bytes;
2580 found->bytes_used = bytes_used;
2581 found->bytes_pinned = 0;
2582 found->bytes_reserved = 0;
2583 found->bytes_readonly = 0;
2584 found->bytes_delalloc = 0;
2586 found->force_alloc = 0;
2587 *space_info = found;
2588 list_add_rcu(&found->list, &info->space_info);
2589 atomic_set(&found->caching_threads, 0);
2593 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2595 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2596 BTRFS_BLOCK_GROUP_RAID1 |
2597 BTRFS_BLOCK_GROUP_RAID10 |
2598 BTRFS_BLOCK_GROUP_DUP);
2600 if (flags & BTRFS_BLOCK_GROUP_DATA)
2601 fs_info->avail_data_alloc_bits |= extra_flags;
2602 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2603 fs_info->avail_metadata_alloc_bits |= extra_flags;
2604 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2605 fs_info->avail_system_alloc_bits |= extra_flags;
2609 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2611 spin_lock(&cache->space_info->lock);
2612 spin_lock(&cache->lock);
2614 cache->space_info->bytes_readonly += cache->key.offset -
2615 btrfs_block_group_used(&cache->item);
2618 spin_unlock(&cache->lock);
2619 spin_unlock(&cache->space_info->lock);
2622 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2624 u64 num_devices = root->fs_info->fs_devices->rw_devices;
2626 if (num_devices == 1)
2627 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2628 if (num_devices < 4)
2629 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2631 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2632 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2633 BTRFS_BLOCK_GROUP_RAID10))) {
2634 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2637 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2638 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2639 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2642 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2643 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2644 (flags & BTRFS_BLOCK_GROUP_RAID10) |
2645 (flags & BTRFS_BLOCK_GROUP_DUP)))
2646 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2650 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2652 struct btrfs_fs_info *info = root->fs_info;
2656 alloc_profile = info->avail_data_alloc_bits &
2657 info->data_alloc_profile;
2658 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2659 } else if (root == root->fs_info->chunk_root) {
2660 alloc_profile = info->avail_system_alloc_bits &
2661 info->system_alloc_profile;
2662 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2664 alloc_profile = info->avail_metadata_alloc_bits &
2665 info->metadata_alloc_profile;
2666 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2669 return btrfs_reduce_alloc_profile(root, data);
2672 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2676 alloc_target = btrfs_get_alloc_profile(root, 1);
2677 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2682 * for now this just makes sure we have at least 5% of our metadata space free
2685 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2687 struct btrfs_fs_info *info = root->fs_info;
2688 struct btrfs_space_info *meta_sinfo;
2689 u64 alloc_target, thresh;
2690 int committed = 0, ret;
2692 /* get the space info for where the metadata will live */
2693 alloc_target = btrfs_get_alloc_profile(root, 0);
2694 meta_sinfo = __find_space_info(info, alloc_target);
2697 spin_lock(&meta_sinfo->lock);
2698 if (!meta_sinfo->full)
2699 thresh = meta_sinfo->total_bytes * 80;
2701 thresh = meta_sinfo->total_bytes * 95;
2703 do_div(thresh, 100);
2705 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2706 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2707 struct btrfs_trans_handle *trans;
2708 if (!meta_sinfo->full) {
2709 meta_sinfo->force_alloc = 1;
2710 spin_unlock(&meta_sinfo->lock);
2712 trans = btrfs_start_transaction(root, 1);
2716 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2717 2 * 1024 * 1024, alloc_target, 0);
2718 btrfs_end_transaction(trans, root);
2721 spin_unlock(&meta_sinfo->lock);
2725 trans = btrfs_join_transaction(root, 1);
2728 ret = btrfs_commit_transaction(trans, root);
2735 spin_unlock(&meta_sinfo->lock);
2741 * This will check the space that the inode allocates from to make sure we have
2742 * enough space for bytes.
2744 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2747 struct btrfs_space_info *data_sinfo;
2748 int ret = 0, committed = 0;
2750 /* make sure bytes are sectorsize aligned */
2751 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2753 data_sinfo = BTRFS_I(inode)->space_info;
2755 /* make sure we have enough space to handle the data first */
2756 spin_lock(&data_sinfo->lock);
2757 if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2758 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2759 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2760 data_sinfo->bytes_may_use < bytes) {
2761 struct btrfs_trans_handle *trans;
2764 * if we don't have enough free bytes in this space then we need
2765 * to alloc a new chunk.
2767 if (!data_sinfo->full) {
2770 data_sinfo->force_alloc = 1;
2771 spin_unlock(&data_sinfo->lock);
2773 alloc_target = btrfs_get_alloc_profile(root, 1);
2774 trans = btrfs_start_transaction(root, 1);
2778 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2779 bytes + 2 * 1024 * 1024,
2781 btrfs_end_transaction(trans, root);
2786 spin_unlock(&data_sinfo->lock);
2788 /* commit the current transaction and try again */
2791 trans = btrfs_join_transaction(root, 1);
2794 ret = btrfs_commit_transaction(trans, root);
2800 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2801 ", %llu bytes_used, %llu bytes_reserved, "
2802 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2803 "%llu total\n", (unsigned long long)bytes,
2804 (unsigned long long)data_sinfo->bytes_delalloc,
2805 (unsigned long long)data_sinfo->bytes_used,
2806 (unsigned long long)data_sinfo->bytes_reserved,
2807 (unsigned long long)data_sinfo->bytes_pinned,
2808 (unsigned long long)data_sinfo->bytes_readonly,
2809 (unsigned long long)data_sinfo->bytes_may_use,
2810 (unsigned long long)data_sinfo->total_bytes);
2813 data_sinfo->bytes_may_use += bytes;
2814 BTRFS_I(inode)->reserved_bytes += bytes;
2815 spin_unlock(&data_sinfo->lock);
2817 return btrfs_check_metadata_free_space(root);
2821 * if there was an error for whatever reason after calling
2822 * btrfs_check_data_free_space, call this so we can cleanup the counters.
2824 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2825 struct inode *inode, u64 bytes)
2827 struct btrfs_space_info *data_sinfo;
2829 /* make sure bytes are sectorsize aligned */
2830 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2832 data_sinfo = BTRFS_I(inode)->space_info;
2833 spin_lock(&data_sinfo->lock);
2834 data_sinfo->bytes_may_use -= bytes;
2835 BTRFS_I(inode)->reserved_bytes -= bytes;
2836 spin_unlock(&data_sinfo->lock);
2839 /* called when we are adding a delalloc extent to the inode's io_tree */
2840 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2843 struct btrfs_space_info *data_sinfo;
2845 /* get the space info for where this inode will be storing its data */
2846 data_sinfo = BTRFS_I(inode)->space_info;
2848 /* make sure we have enough space to handle the data first */
2849 spin_lock(&data_sinfo->lock);
2850 data_sinfo->bytes_delalloc += bytes;
2853 * we are adding a delalloc extent without calling
2854 * btrfs_check_data_free_space first. This happens on a weird
2855 * writepage condition, but shouldn't hurt our accounting
2857 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2858 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2859 BTRFS_I(inode)->reserved_bytes = 0;
2861 data_sinfo->bytes_may_use -= bytes;
2862 BTRFS_I(inode)->reserved_bytes -= bytes;
2865 spin_unlock(&data_sinfo->lock);
2868 /* called when we are clearing an delalloc extent from the inode's io_tree */
2869 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2872 struct btrfs_space_info *info;
2874 info = BTRFS_I(inode)->space_info;
2876 spin_lock(&info->lock);
2877 info->bytes_delalloc -= bytes;
2878 spin_unlock(&info->lock);
2881 static void force_metadata_allocation(struct btrfs_fs_info *info)
2883 struct list_head *head = &info->space_info;
2884 struct btrfs_space_info *found;
2887 list_for_each_entry_rcu(found, head, list) {
2888 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2889 found->force_alloc = 1;
2894 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2895 struct btrfs_root *extent_root, u64 alloc_bytes,
2896 u64 flags, int force)
2898 struct btrfs_space_info *space_info;
2899 struct btrfs_fs_info *fs_info = extent_root->fs_info;
2903 mutex_lock(&fs_info->chunk_mutex);
2905 flags = btrfs_reduce_alloc_profile(extent_root, flags);
2907 space_info = __find_space_info(extent_root->fs_info, flags);
2909 ret = update_space_info(extent_root->fs_info, flags,
2913 BUG_ON(!space_info);
2915 spin_lock(&space_info->lock);
2916 if (space_info->force_alloc) {
2918 space_info->force_alloc = 0;
2920 if (space_info->full) {
2921 spin_unlock(&space_info->lock);
2925 thresh = space_info->total_bytes - space_info->bytes_readonly;
2926 thresh = div_factor(thresh, 6);
2928 (space_info->bytes_used + space_info->bytes_pinned +
2929 space_info->bytes_reserved + alloc_bytes) < thresh) {
2930 spin_unlock(&space_info->lock);
2933 spin_unlock(&space_info->lock);
2936 * if we're doing a data chunk, go ahead and make sure that
2937 * we keep a reasonable number of metadata chunks allocated in the
2940 if (flags & BTRFS_BLOCK_GROUP_DATA) {
2941 fs_info->data_chunk_allocations++;
2942 if (!(fs_info->data_chunk_allocations %
2943 fs_info->metadata_ratio))
2944 force_metadata_allocation(fs_info);
2947 ret = btrfs_alloc_chunk(trans, extent_root, flags);
2949 space_info->full = 1;
2951 mutex_unlock(&extent_root->fs_info->chunk_mutex);
2955 static int update_block_group(struct btrfs_trans_handle *trans,
2956 struct btrfs_root *root,
2957 u64 bytenr, u64 num_bytes, int alloc,
2960 struct btrfs_block_group_cache *cache;
2961 struct btrfs_fs_info *info = root->fs_info;
2962 u64 total = num_bytes;
2966 /* block accounting for super block */
2967 spin_lock(&info->delalloc_lock);
2968 old_val = btrfs_super_bytes_used(&info->super_copy);
2970 old_val += num_bytes;
2972 old_val -= num_bytes;
2973 btrfs_set_super_bytes_used(&info->super_copy, old_val);
2975 /* block accounting for root item */
2976 old_val = btrfs_root_used(&root->root_item);
2978 old_val += num_bytes;
2980 old_val -= num_bytes;
2981 btrfs_set_root_used(&root->root_item, old_val);
2982 spin_unlock(&info->delalloc_lock);
2985 cache = btrfs_lookup_block_group(info, bytenr);
2988 byte_in_group = bytenr - cache->key.objectid;
2989 WARN_ON(byte_in_group > cache->key.offset);
2991 spin_lock(&cache->space_info->lock);
2992 spin_lock(&cache->lock);
2994 old_val = btrfs_block_group_used(&cache->item);
2995 num_bytes = min(total, cache->key.offset - byte_in_group);
2997 old_val += num_bytes;
2998 cache->space_info->bytes_used += num_bytes;
3000 cache->space_info->bytes_readonly -= num_bytes;
3001 btrfs_set_block_group_used(&cache->item, old_val);
3002 spin_unlock(&cache->lock);
3003 spin_unlock(&cache->space_info->lock);
3005 old_val -= num_bytes;
3006 cache->space_info->bytes_used -= num_bytes;
3008 cache->space_info->bytes_readonly += num_bytes;
3009 btrfs_set_block_group_used(&cache->item, old_val);
3010 spin_unlock(&cache->lock);
3011 spin_unlock(&cache->space_info->lock);
3015 ret = btrfs_discard_extent(root, bytenr,
3019 ret = btrfs_add_free_space(cache, bytenr,
3024 btrfs_put_block_group(cache);
3026 bytenr += num_bytes;
3031 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3033 struct btrfs_block_group_cache *cache;
3036 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3040 bytenr = cache->key.objectid;
3041 btrfs_put_block_group(cache);
3046 int btrfs_update_pinned_extents(struct btrfs_root *root,
3047 u64 bytenr, u64 num, int pin)
3050 struct btrfs_block_group_cache *cache;
3051 struct btrfs_fs_info *fs_info = root->fs_info;
3054 set_extent_dirty(&fs_info->pinned_extents,
3055 bytenr, bytenr + num - 1, GFP_NOFS);
3058 cache = btrfs_lookup_block_group(fs_info, bytenr);
3060 len = min(num, cache->key.offset -
3061 (bytenr - cache->key.objectid));
3063 spin_lock(&cache->space_info->lock);
3064 spin_lock(&cache->lock);
3065 cache->pinned += len;
3066 cache->space_info->bytes_pinned += len;
3067 spin_unlock(&cache->lock);
3068 spin_unlock(&cache->space_info->lock);
3069 fs_info->total_pinned += len;
3074 * in order to not race with the block group caching, we
3075 * only want to unpin the extent if we are cached. If
3076 * we aren't cached, we want to start async caching this
3077 * block group so we can free the extent the next time
3080 spin_lock(&cache->space_info->lock);
3081 spin_lock(&cache->lock);
3082 unpin = (cache->cached == BTRFS_CACHE_FINISHED);
3083 if (likely(unpin)) {
3084 cache->pinned -= len;
3085 cache->space_info->bytes_pinned -= len;
3086 fs_info->total_pinned -= len;
3088 spin_unlock(&cache->lock);
3089 spin_unlock(&cache->space_info->lock);
3092 clear_extent_dirty(&fs_info->pinned_extents,
3093 bytenr, bytenr + len -1,
3096 cache_block_group(cache);
3099 btrfs_add_free_space(cache, bytenr, len);
3101 btrfs_put_block_group(cache);
3108 static int update_reserved_extents(struct btrfs_root *root,
3109 u64 bytenr, u64 num, int reserve)
3112 struct btrfs_block_group_cache *cache;
3113 struct btrfs_fs_info *fs_info = root->fs_info;
3116 cache = btrfs_lookup_block_group(fs_info, bytenr);
3118 len = min(num, cache->key.offset -
3119 (bytenr - cache->key.objectid));
3121 spin_lock(&cache->space_info->lock);
3122 spin_lock(&cache->lock);
3124 cache->reserved += len;
3125 cache->space_info->bytes_reserved += len;
3127 cache->reserved -= len;
3128 cache->space_info->bytes_reserved -= len;
3130 spin_unlock(&cache->lock);
3131 spin_unlock(&cache->space_info->lock);
3132 btrfs_put_block_group(cache);
3139 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3144 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
3148 ret = find_first_extent_bit(pinned_extents, last,
3149 &start, &end, EXTENT_DIRTY);
3153 set_extent_dirty(copy, start, end, GFP_NOFS);
3159 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3160 struct btrfs_root *root,
3161 struct extent_io_tree *unpin)
3168 ret = find_first_extent_bit(unpin, 0, &start, &end,
3173 ret = btrfs_discard_extent(root, start, end + 1 - start);
3175 /* unlocks the pinned mutex */
3176 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
3177 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3185 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3186 struct btrfs_root *root,
3187 struct btrfs_path *path,
3188 u64 bytenr, u64 num_bytes, int is_data,
3189 struct extent_buffer **must_clean)
3192 struct extent_buffer *buf;
3197 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3201 /* we can reuse a block if it hasn't been written
3202 * and it is from this transaction. We can't
3203 * reuse anything from the tree log root because
3204 * it has tiny sub-transactions.
3206 if (btrfs_buffer_uptodate(buf, 0) &&
3207 btrfs_try_tree_lock(buf)) {
3208 u64 header_owner = btrfs_header_owner(buf);
3209 u64 header_transid = btrfs_header_generation(buf);
3210 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3211 header_transid == trans->transid &&
3212 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3216 btrfs_tree_unlock(buf);
3218 free_extent_buffer(buf);
3220 btrfs_set_path_blocking(path);
3221 /* unlocks the pinned mutex */
3222 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3229 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3230 struct btrfs_root *root,
3231 u64 bytenr, u64 num_bytes, u64 parent,
3232 u64 root_objectid, u64 owner_objectid,
3233 u64 owner_offset, int refs_to_drop,
3234 struct btrfs_delayed_extent_op *extent_op)
3236 struct btrfs_key key;
3237 struct btrfs_path *path;
3238 struct btrfs_fs_info *info = root->fs_info;
3239 struct btrfs_root *extent_root = info->extent_root;
3240 struct extent_buffer *leaf;
3241 struct btrfs_extent_item *ei;
3242 struct btrfs_extent_inline_ref *iref;
3245 int extent_slot = 0;
3246 int found_extent = 0;
3251 path = btrfs_alloc_path();
3256 path->leave_spinning = 1;
3258 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3259 BUG_ON(!is_data && refs_to_drop != 1);
3261 ret = lookup_extent_backref(trans, extent_root, path, &iref,
3262 bytenr, num_bytes, parent,
3263 root_objectid, owner_objectid,
3266 extent_slot = path->slots[0];
3267 while (extent_slot >= 0) {
3268 btrfs_item_key_to_cpu(path->nodes[0], &key,
3270 if (key.objectid != bytenr)
3272 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3273 key.offset == num_bytes) {
3277 if (path->slots[0] - extent_slot > 5)
3281 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3282 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3283 if (found_extent && item_size < sizeof(*ei))
3286 if (!found_extent) {
3288 ret = remove_extent_backref(trans, extent_root, path,
3292 btrfs_release_path(extent_root, path);
3293 path->leave_spinning = 1;
3295 key.objectid = bytenr;
3296 key.type = BTRFS_EXTENT_ITEM_KEY;
3297 key.offset = num_bytes;
3299 ret = btrfs_search_slot(trans, extent_root,
3302 printk(KERN_ERR "umm, got %d back from search"
3303 ", was looking for %llu\n", ret,
3304 (unsigned long long)bytenr);
3305 btrfs_print_leaf(extent_root, path->nodes[0]);
3308 extent_slot = path->slots[0];
3311 btrfs_print_leaf(extent_root, path->nodes[0]);
3313 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3314 "parent %llu root %llu owner %llu offset %llu\n",
3315 (unsigned long long)bytenr,
3316 (unsigned long long)parent,
3317 (unsigned long long)root_objectid,
3318 (unsigned long long)owner_objectid,
3319 (unsigned long long)owner_offset);
3322 leaf = path->nodes[0];
3323 item_size = btrfs_item_size_nr(leaf, extent_slot);
3324 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3325 if (item_size < sizeof(*ei)) {
3326 BUG_ON(found_extent || extent_slot != path->slots[0]);
3327 ret = convert_extent_item_v0(trans, extent_root, path,
3331 btrfs_release_path(extent_root, path);
3332 path->leave_spinning = 1;
3334 key.objectid = bytenr;
3335 key.type = BTRFS_EXTENT_ITEM_KEY;
3336 key.offset = num_bytes;
3338 ret = btrfs_search_slot(trans, extent_root, &key, path,
3341 printk(KERN_ERR "umm, got %d back from search"
3342 ", was looking for %llu\n", ret,
3343 (unsigned long long)bytenr);
3344 btrfs_print_leaf(extent_root, path->nodes[0]);
3347 extent_slot = path->slots[0];
3348 leaf = path->nodes[0];
3349 item_size = btrfs_item_size_nr(leaf, extent_slot);
3352 BUG_ON(item_size < sizeof(*ei));
3353 ei = btrfs_item_ptr(leaf, extent_slot,
3354 struct btrfs_extent_item);
3355 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3356 struct btrfs_tree_block_info *bi;
3357 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3358 bi = (struct btrfs_tree_block_info *)(ei + 1);
3359 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3362 refs = btrfs_extent_refs(leaf, ei);
3363 BUG_ON(refs < refs_to_drop);
3364 refs -= refs_to_drop;
3368 __run_delayed_extent_op(extent_op, leaf, ei);
3370 * In the case of inline back ref, reference count will
3371 * be updated by remove_extent_backref
3374 BUG_ON(!found_extent);
3376 btrfs_set_extent_refs(leaf, ei, refs);
3377 btrfs_mark_buffer_dirty(leaf);
3380 ret = remove_extent_backref(trans, extent_root, path,
3387 struct extent_buffer *must_clean = NULL;
3390 BUG_ON(is_data && refs_to_drop !=
3391 extent_data_ref_count(root, path, iref));
3393 BUG_ON(path->slots[0] != extent_slot);
3395 BUG_ON(path->slots[0] != extent_slot + 1);
3396 path->slots[0] = extent_slot;
3401 ret = pin_down_bytes(trans, root, path, bytenr,
3402 num_bytes, is_data, &must_clean);
3407 * it is going to be very rare for someone to be waiting
3408 * on the block we're freeing. del_items might need to
3409 * schedule, so rather than get fancy, just force it
3413 btrfs_set_lock_blocking(must_clean);
3415 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3418 btrfs_release_path(extent_root, path);
3421 clean_tree_block(NULL, root, must_clean);
3422 btrfs_tree_unlock(must_clean);
3423 free_extent_buffer(must_clean);
3427 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3430 invalidate_mapping_pages(info->btree_inode->i_mapping,
3431 bytenr >> PAGE_CACHE_SHIFT,
3432 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3435 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3439 btrfs_free_path(path);
3444 * when we free an extent, it is possible (and likely) that we free the last
3445 * delayed ref for that extent as well. This searches the delayed ref tree for
3446 * a given extent, and if there are no other delayed refs to be processed, it
3447 * removes it from the tree.
3449 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3450 struct btrfs_root *root, u64 bytenr)
3452 struct btrfs_delayed_ref_head *head;
3453 struct btrfs_delayed_ref_root *delayed_refs;
3454 struct btrfs_delayed_ref_node *ref;
3455 struct rb_node *node;
3458 delayed_refs = &trans->transaction->delayed_refs;
3459 spin_lock(&delayed_refs->lock);
3460 head = btrfs_find_delayed_ref_head(trans, bytenr);
3464 node = rb_prev(&head->node.rb_node);
3468 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3470 /* there are still entries for this ref, we can't drop it */
3471 if (ref->bytenr == bytenr)
3474 if (head->extent_op) {
3475 if (!head->must_insert_reserved)
3477 kfree(head->extent_op);
3478 head->extent_op = NULL;
3482 * waiting for the lock here would deadlock. If someone else has it
3483 * locked they are already in the process of dropping it anyway
3485 if (!mutex_trylock(&head->mutex))
3489 * at this point we have a head with no other entries. Go
3490 * ahead and process it.
3492 head->node.in_tree = 0;
3493 rb_erase(&head->node.rb_node, &delayed_refs->root);
3495 delayed_refs->num_entries--;
3498 * we don't take a ref on the node because we're removing it from the
3499 * tree, so we just steal the ref the tree was holding.
3501 delayed_refs->num_heads--;
3502 if (list_empty(&head->cluster))
3503 delayed_refs->num_heads_ready--;
3505 list_del_init(&head->cluster);
3506 spin_unlock(&delayed_refs->lock);
3508 ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3509 &head->node, head->extent_op,
3510 head->must_insert_reserved);
3512 btrfs_put_delayed_ref(&head->node);
3515 spin_unlock(&delayed_refs->lock);
3519 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3520 struct btrfs_root *root,
3521 u64 bytenr, u64 num_bytes, u64 parent,
3522 u64 root_objectid, u64 owner, u64 offset)
3527 * tree log blocks never actually go into the extent allocation
3528 * tree, just update pinning info and exit early.
3530 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3531 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3532 /* unlocks the pinned mutex */
3533 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3534 update_reserved_extents(root, bytenr, num_bytes, 0);
3536 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3537 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3538 parent, root_objectid, (int)owner,
3539 BTRFS_DROP_DELAYED_REF, NULL);
3541 ret = check_ref_cleanup(trans, root, bytenr);
3544 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3545 parent, root_objectid, owner,
3546 offset, BTRFS_DROP_DELAYED_REF, NULL);
3552 static u64 stripe_align(struct btrfs_root *root, u64 val)
3554 u64 mask = ((u64)root->stripesize - 1);
3555 u64 ret = (val + mask) & ~mask;
3560 * when we wait for progress in the block group caching, its because
3561 * our allocation attempt failed at least once. So, we must sleep
3562 * and let some progress happen before we try again.
3564 * This function will sleep at least once waiting for new free space to
3565 * show up, and then it will check the block group free space numbers
3566 * for our min num_bytes. Another option is to have it go ahead
3567 * and look in the rbtree for a free extent of a given size, but this
3571 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3576 prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3578 if (block_group_cache_done(cache)) {
3579 finish_wait(&cache->caching_q, &wait);
3583 finish_wait(&cache->caching_q, &wait);
3585 wait_event(cache->caching_q, block_group_cache_done(cache) ||
3586 (cache->free_space >= num_bytes));
3590 enum btrfs_loop_type {
3591 LOOP_CACHED_ONLY = 0,
3592 LOOP_CACHING_NOWAIT = 1,
3593 LOOP_CACHING_WAIT = 2,
3594 LOOP_ALLOC_CHUNK = 3,
3595 LOOP_NO_EMPTY_SIZE = 4,
3599 * walks the btree of allocated extents and find a hole of a given size.
3600 * The key ins is changed to record the hole:
3601 * ins->objectid == block start
3602 * ins->flags = BTRFS_EXTENT_ITEM_KEY
3603 * ins->offset == number of blocks
3604 * Any available blocks before search_start are skipped.
3606 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3607 struct btrfs_root *orig_root,
3608 u64 num_bytes, u64 empty_size,
3609 u64 search_start, u64 search_end,
3610 u64 hint_byte, struct btrfs_key *ins,
3611 u64 exclude_start, u64 exclude_nr,
3615 struct btrfs_root *root = orig_root->fs_info->extent_root;
3616 struct btrfs_free_cluster *last_ptr = NULL;
3617 struct btrfs_block_group_cache *block_group = NULL;
3618 int empty_cluster = 2 * 1024 * 1024;
3619 int allowed_chunk_alloc = 0;
3620 struct btrfs_space_info *space_info;
3621 int last_ptr_loop = 0;
3623 bool found_uncached_bg = false;
3625 WARN_ON(num_bytes < root->sectorsize);
3626 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
3630 space_info = __find_space_info(root->fs_info, data);
3632 if (orig_root->ref_cows || empty_size)
3633 allowed_chunk_alloc = 1;
3635 if (data & BTRFS_BLOCK_GROUP_METADATA) {
3636 last_ptr = &root->fs_info->meta_alloc_cluster;
3637 if (!btrfs_test_opt(root, SSD))
3638 empty_cluster = 64 * 1024;
3641 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
3642 last_ptr = &root->fs_info->data_alloc_cluster;
3646 spin_lock(&last_ptr->lock);
3647 if (last_ptr->block_group)
3648 hint_byte = last_ptr->window_start;
3649 spin_unlock(&last_ptr->lock);
3652 search_start = max(search_start, first_logical_byte(root, 0));
3653 search_start = max(search_start, hint_byte);
3658 if (search_start == hint_byte) {
3659 block_group = btrfs_lookup_block_group(root->fs_info,
3662 * we don't want to use the block group if it doesn't match our
3663 * allocation bits, or if its not cached.
3665 if (block_group && block_group_bits(block_group, data) &&
3666 block_group_cache_done(block_group)) {
3667 down_read(&space_info->groups_sem);
3668 if (list_empty(&block_group->list) ||
3671 * someone is removing this block group,
3672 * we can't jump into the have_block_group
3673 * target because our list pointers are not
3676 btrfs_put_block_group(block_group);
3677 up_read(&space_info->groups_sem);
3679 goto have_block_group;
3680 } else if (block_group) {
3681 btrfs_put_block_group(block_group);
3686 down_read(&space_info->groups_sem);
3687 list_for_each_entry(block_group, &space_info->block_groups, list) {
3691 atomic_inc(&block_group->count);
3692 search_start = block_group->key.objectid;
3695 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3697 * we want to start caching kthreads, but not too many
3698 * right off the bat so we don't overwhelm the system,
3699 * so only start them if there are less than 2 and we're
3700 * in the initial allocation phase.
3702 if (loop > LOOP_CACHING_NOWAIT ||
3703 atomic_read(&space_info->caching_threads) < 2) {
3704 ret = cache_block_group(block_group);
3709 cached = block_group_cache_done(block_group);
3710 if (unlikely(!cached)) {
3711 found_uncached_bg = true;
3713 /* if we only want cached bgs, loop */
3714 if (loop == LOOP_CACHED_ONLY)
3718 if (unlikely(block_group->ro))
3723 * the refill lock keeps out other
3724 * people trying to start a new cluster
3726 spin_lock(&last_ptr->refill_lock);
3727 if (last_ptr->block_group &&
3728 (last_ptr->block_group->ro ||
3729 !block_group_bits(last_ptr->block_group, data))) {
3731 goto refill_cluster;
3734 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
3735 num_bytes, search_start);
3737 /* we have a block, we're done */
3738 spin_unlock(&last_ptr->refill_lock);
3742 spin_lock(&last_ptr->lock);
3744 * whoops, this cluster doesn't actually point to
3745 * this block group. Get a ref on the block
3746 * group is does point to and try again
3748 if (!last_ptr_loop && last_ptr->block_group &&
3749 last_ptr->block_group != block_group) {
3751 btrfs_put_block_group(block_group);
3752 block_group = last_ptr->block_group;
3753 atomic_inc(&block_group->count);
3754 spin_unlock(&last_ptr->lock);
3755 spin_unlock(&last_ptr->refill_lock);
3758 search_start = block_group->key.objectid;
3760 * we know this block group is properly
3761 * in the list because
3762 * btrfs_remove_block_group, drops the
3763 * cluster before it removes the block
3764 * group from the list
3766 goto have_block_group;
3768 spin_unlock(&last_ptr->lock);
3771 * this cluster didn't work out, free it and
3774 btrfs_return_cluster_to_free_space(NULL, last_ptr);
3778 /* allocate a cluster in this block group */
3779 ret = btrfs_find_space_cluster(trans, root,
3780 block_group, last_ptr,
3782 empty_cluster + empty_size);
3785 * now pull our allocation out of this
3788 offset = btrfs_alloc_from_cluster(block_group,
3789 last_ptr, num_bytes,
3792 /* we found one, proceed */
3793 spin_unlock(&last_ptr->refill_lock);
3796 } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3797 spin_unlock(&last_ptr->refill_lock);
3799 wait_block_group_cache_progress(block_group,
3800 num_bytes + empty_cluster + empty_size);
3801 goto have_block_group;
3805 * at this point we either didn't find a cluster
3806 * or we weren't able to allocate a block from our
3807 * cluster. Free the cluster we've been trying
3808 * to use, and go to the next block group
3810 if (loop < LOOP_NO_EMPTY_SIZE) {
3811 btrfs_return_cluster_to_free_space(NULL,
3813 spin_unlock(&last_ptr->refill_lock);
3816 spin_unlock(&last_ptr->refill_lock);
3819 offset = btrfs_find_space_for_alloc(block_group, search_start,
3820 num_bytes, empty_size);
3821 if (!offset && (cached || (!cached &&
3822 loop == LOOP_CACHING_NOWAIT))) {
3824 } else if (!offset && (!cached &&
3825 loop > LOOP_CACHING_NOWAIT)) {
3826 wait_block_group_cache_progress(block_group,
3827 num_bytes + empty_size);
3828 goto have_block_group;
3831 search_start = stripe_align(root, offset);
3832 /* move on to the next group */
3833 if (search_start + num_bytes >= search_end) {
3834 btrfs_add_free_space(block_group, offset, num_bytes);
3838 /* move on to the next group */
3839 if (search_start + num_bytes >
3840 block_group->key.objectid + block_group->key.offset) {
3841 btrfs_add_free_space(block_group, offset, num_bytes);
3845 if (exclude_nr > 0 &&
3846 (search_start + num_bytes > exclude_start &&
3847 search_start < exclude_start + exclude_nr)) {
3848 search_start = exclude_start + exclude_nr;
3850 btrfs_add_free_space(block_group, offset, num_bytes);
3852 * if search_start is still in this block group
3853 * then we just re-search this block group
3855 if (search_start >= block_group->key.objectid &&
3856 search_start < (block_group->key.objectid +
3857 block_group->key.offset))
3858 goto have_block_group;
3862 ins->objectid = search_start;
3863 ins->offset = num_bytes;
3865 if (offset < search_start)
3866 btrfs_add_free_space(block_group, offset,
3867 search_start - offset);
3868 BUG_ON(offset > search_start);
3870 /* we are all good, lets return */
3873 btrfs_put_block_group(block_group);
3875 up_read(&space_info->groups_sem);
3877 /* LOOP_CACHED_ONLY, only search fully cached block groups
3878 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3879 * dont wait foR them to finish caching
3880 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3881 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3882 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3885 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3886 (found_uncached_bg || empty_size || empty_cluster ||
3887 allowed_chunk_alloc)) {
3888 if (found_uncached_bg) {
3889 found_uncached_bg = false;
3890 if (loop < LOOP_CACHING_WAIT) {
3896 if (loop == LOOP_ALLOC_CHUNK) {
3901 if (allowed_chunk_alloc) {
3902 ret = do_chunk_alloc(trans, root, num_bytes +
3903 2 * 1024 * 1024, data, 1);
3904 allowed_chunk_alloc = 0;
3906 space_info->force_alloc = 1;
3909 if (loop < LOOP_NO_EMPTY_SIZE) {
3914 } else if (!ins->objectid) {
3918 /* we found what we needed */
3919 if (ins->objectid) {
3920 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3921 trans->block_group = block_group->key.objectid;
3923 btrfs_put_block_group(block_group);
3930 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3932 struct btrfs_block_group_cache *cache;
3934 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3935 (unsigned long long)(info->total_bytes - info->bytes_used -
3936 info->bytes_pinned - info->bytes_reserved),
3937 (info->full) ? "" : "not ");
3938 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
3939 " may_use=%llu, used=%llu\n",
3940 (unsigned long long)info->total_bytes,
3941 (unsigned long long)info->bytes_pinned,
3942 (unsigned long long)info->bytes_delalloc,
3943 (unsigned long long)info->bytes_may_use,
3944 (unsigned long long)info->bytes_used);
3946 down_read(&info->groups_sem);
3947 list_for_each_entry(cache, &info->block_groups, list) {
3948 spin_lock(&cache->lock);
3949 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3950 "%llu pinned %llu reserved\n",
3951 (unsigned long long)cache->key.objectid,
3952 (unsigned long long)cache->key.offset,
3953 (unsigned long long)btrfs_block_group_used(&cache->item),
3954 (unsigned long long)cache->pinned,
3955 (unsigned long long)cache->reserved);
3956 btrfs_dump_free_space(cache, bytes);
3957 spin_unlock(&cache->lock);
3959 up_read(&info->groups_sem);
3962 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3963 struct btrfs_root *root,
3964 u64 num_bytes, u64 min_alloc_size,
3965 u64 empty_size, u64 hint_byte,
3966 u64 search_end, struct btrfs_key *ins,
3970 u64 search_start = 0;
3971 struct btrfs_fs_info *info = root->fs_info;
3973 data = btrfs_get_alloc_profile(root, data);
3976 * the only place that sets empty_size is btrfs_realloc_node, which
3977 * is not called recursively on allocations
3979 if (empty_size || root->ref_cows) {
3980 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3981 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3983 BTRFS_BLOCK_GROUP_METADATA |
3984 (info->metadata_alloc_profile &
3985 info->avail_metadata_alloc_bits), 0);
3987 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3988 num_bytes + 2 * 1024 * 1024, data, 0);
3991 WARN_ON(num_bytes < root->sectorsize);
3992 ret = find_free_extent(trans, root, num_bytes, empty_size,
3993 search_start, search_end, hint_byte, ins,
3994 trans->alloc_exclude_start,
3995 trans->alloc_exclude_nr, data);
3997 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3998 num_bytes = num_bytes >> 1;
3999 num_bytes = num_bytes & ~(root->sectorsize - 1);
4000 num_bytes = max(num_bytes, min_alloc_size);
4001 do_chunk_alloc(trans, root->fs_info->extent_root,
4002 num_bytes, data, 1);
4005 if (ret == -ENOSPC) {
4006 struct btrfs_space_info *sinfo;
4008 sinfo = __find_space_info(root->fs_info, data);
4009 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4010 "wanted %llu\n", (unsigned long long)data,
4011 (unsigned long long)num_bytes);
4012 dump_space_info(sinfo, num_bytes);
4018 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4020 struct btrfs_block_group_cache *cache;
4023 cache = btrfs_lookup_block_group(root->fs_info, start);
4025 printk(KERN_ERR "Unable to find block group for %llu\n",
4026 (unsigned long long)start);
4030 ret = btrfs_discard_extent(root, start, len);
4032 btrfs_add_free_space(cache, start, len);
4033 btrfs_put_block_group(cache);
4034 update_reserved_extents(root, start, len, 0);
4039 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4040 struct btrfs_root *root,
4041 u64 num_bytes, u64 min_alloc_size,
4042 u64 empty_size, u64 hint_byte,
4043 u64 search_end, struct btrfs_key *ins,
4047 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
4048 empty_size, hint_byte, search_end, ins,
4051 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4056 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4057 struct btrfs_root *root,
4058 u64 parent, u64 root_objectid,
4059 u64 flags, u64 owner, u64 offset,
4060 struct btrfs_key *ins, int ref_mod)
4063 struct btrfs_fs_info *fs_info = root->fs_info;
4064 struct btrfs_extent_item *extent_item;
4065 struct btrfs_extent_inline_ref *iref;
4066 struct btrfs_path *path;
4067 struct extent_buffer *leaf;
4072 type = BTRFS_SHARED_DATA_REF_KEY;
4074 type = BTRFS_EXTENT_DATA_REF_KEY;
4076 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4078 path = btrfs_alloc_path();
4081 path->leave_spinning = 1;
4082 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4086 leaf = path->nodes[0];
4087 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4088 struct btrfs_extent_item);
4089 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4090 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4091 btrfs_set_extent_flags(leaf, extent_item,
4092 flags | BTRFS_EXTENT_FLAG_DATA);
4094 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4095 btrfs_set_extent_inline_ref_type(leaf, iref, type);
4097 struct btrfs_shared_data_ref *ref;
4098 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4099 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4100 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4102 struct btrfs_extent_data_ref *ref;
4103 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4104 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4105 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4106 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4107 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4110 btrfs_mark_buffer_dirty(path->nodes[0]);
4111 btrfs_free_path(path);
4113 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4116 printk(KERN_ERR "btrfs update block group failed for %llu "
4117 "%llu\n", (unsigned long long)ins->objectid,
4118 (unsigned long long)ins->offset);
4124 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4125 struct btrfs_root *root,
4126 u64 parent, u64 root_objectid,
4127 u64 flags, struct btrfs_disk_key *key,
4128 int level, struct btrfs_key *ins)
4131 struct btrfs_fs_info *fs_info = root->fs_info;
4132 struct btrfs_extent_item *extent_item;
4133 struct btrfs_tree_block_info *block_info;
4134 struct btrfs_extent_inline_ref *iref;
4135 struct btrfs_path *path;
4136 struct extent_buffer *leaf;
4137 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4139 path = btrfs_alloc_path();
4142 path->leave_spinning = 1;
4143 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4147 leaf = path->nodes[0];
4148 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4149 struct btrfs_extent_item);
4150 btrfs_set_extent_refs(leaf, extent_item, 1);
4151 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4152 btrfs_set_extent_flags(leaf, extent_item,
4153 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4154 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4156 btrfs_set_tree_block_key(leaf, block_info, key);
4157 btrfs_set_tree_block_level(leaf, block_info, level);
4159 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4161 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4162 btrfs_set_extent_inline_ref_type(leaf, iref,
4163 BTRFS_SHARED_BLOCK_REF_KEY);
4164 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4166 btrfs_set_extent_inline_ref_type(leaf, iref,
4167 BTRFS_TREE_BLOCK_REF_KEY);
4168 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4171 btrfs_mark_buffer_dirty(leaf);
4172 btrfs_free_path(path);
4174 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4177 printk(KERN_ERR "btrfs update block group failed for %llu "
4178 "%llu\n", (unsigned long long)ins->objectid,
4179 (unsigned long long)ins->offset);
4185 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root,
4187 u64 root_objectid, u64 owner,
4188 u64 offset, struct btrfs_key *ins)
4192 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4194 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4195 0, root_objectid, owner, offset,
4196 BTRFS_ADD_DELAYED_EXTENT, NULL);
4201 * this is used by the tree logging recovery code. It records that
4202 * an extent has been allocated and makes sure to clear the free
4203 * space cache bits as well
4205 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4206 struct btrfs_root *root,
4207 u64 root_objectid, u64 owner, u64 offset,
4208 struct btrfs_key *ins)
4211 struct btrfs_block_group_cache *block_group;
4213 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4214 cache_block_group(block_group);
4215 wait_event(block_group->caching_q,
4216 block_group_cache_done(block_group));
4218 ret = btrfs_remove_free_space(block_group, ins->objectid,
4221 btrfs_put_block_group(block_group);
4222 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4223 0, owner, offset, ins, 1);
4228 * finds a free extent and does all the dirty work required for allocation
4229 * returns the key for the extent through ins, and a tree buffer for
4230 * the first block of the extent through buf.
4232 * returns 0 if everything worked, non-zero otherwise.
4234 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4235 struct btrfs_root *root,
4236 u64 num_bytes, u64 parent, u64 root_objectid,
4237 struct btrfs_disk_key *key, int level,
4238 u64 empty_size, u64 hint_byte, u64 search_end,
4239 struct btrfs_key *ins)
4244 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4245 empty_size, hint_byte, search_end,
4250 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4252 parent = ins->objectid;
4253 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4257 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4258 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4259 struct btrfs_delayed_extent_op *extent_op;
4260 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4263 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4265 memset(&extent_op->key, 0, sizeof(extent_op->key));
4266 extent_op->flags_to_set = flags;
4267 extent_op->update_key = 1;
4268 extent_op->update_flags = 1;
4269 extent_op->is_data = 0;
4271 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4272 ins->offset, parent, root_objectid,
4273 level, BTRFS_ADD_DELAYED_EXTENT,
4280 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4281 struct btrfs_root *root,
4282 u64 bytenr, u32 blocksize,
4285 struct extent_buffer *buf;
4287 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4289 return ERR_PTR(-ENOMEM);
4290 btrfs_set_header_generation(buf, trans->transid);
4291 btrfs_set_buffer_lockdep_class(buf, level);
4292 btrfs_tree_lock(buf);
4293 clean_tree_block(trans, root, buf);
4295 btrfs_set_lock_blocking(buf);
4296 btrfs_set_buffer_uptodate(buf);
4298 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4299 set_extent_dirty(&root->dirty_log_pages, buf->start,
4300 buf->start + buf->len - 1, GFP_NOFS);
4302 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4303 buf->start + buf->len - 1, GFP_NOFS);
4305 trans->blocks_used++;
4306 /* this returns a buffer locked for blocking */
4311 * helper function to allocate a block for a given tree
4312 * returns the tree buffer or NULL.
4314 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4315 struct btrfs_root *root, u32 blocksize,
4316 u64 parent, u64 root_objectid,
4317 struct btrfs_disk_key *key, int level,
4318 u64 hint, u64 empty_size)
4320 struct btrfs_key ins;
4322 struct extent_buffer *buf;
4324 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4325 key, level, empty_size, hint, (u64)-1, &ins);
4328 return ERR_PTR(ret);
4331 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4337 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4338 struct btrfs_root *root, struct extent_buffer *leaf)
4342 struct btrfs_key key;
4343 struct btrfs_file_extent_item *fi;
4348 BUG_ON(!btrfs_is_leaf(leaf));
4349 nritems = btrfs_header_nritems(leaf);
4351 for (i = 0; i < nritems; i++) {
4353 btrfs_item_key_to_cpu(leaf, &key, i);
4355 /* only extents have references, skip everything else */
4356 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4359 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4361 /* inline extents live in the btree, they don't have refs */
4362 if (btrfs_file_extent_type(leaf, fi) ==
4363 BTRFS_FILE_EXTENT_INLINE)
4366 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4368 /* holes don't have refs */
4369 if (disk_bytenr == 0)
4372 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4373 ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes,
4374 leaf->start, 0, key.objectid, 0);
4380 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
4381 struct btrfs_root *root,
4382 struct btrfs_leaf_ref *ref)
4386 struct btrfs_extent_info *info;
4387 struct refsort *sorted;
4389 if (ref->nritems == 0)
4392 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
4393 for (i = 0; i < ref->nritems; i++) {
4394 sorted[i].bytenr = ref->extents[i].bytenr;
4397 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
4400 * the items in the ref were sorted when the ref was inserted
4401 * into the ref cache, so this is already in order
4403 for (i = 0; i < ref->nritems; i++) {
4404 info = ref->extents + sorted[i].slot;
4405 ret = btrfs_free_extent(trans, root, info->bytenr,
4406 info->num_bytes, ref->bytenr,
4407 ref->owner, ref->generation,
4410 atomic_inc(&root->fs_info->throttle_gen);
4411 wake_up(&root->fs_info->transaction_throttle);
4423 static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
4424 struct btrfs_root *root, u64 start,
4429 ret = btrfs_lookup_extent_refs(trans, root, start, len, refs);
4432 #if 0 /* some debugging code in case we see problems here */
4433 /* if the refs count is one, it won't get increased again. But
4434 * if the ref count is > 1, someone may be decreasing it at
4435 * the same time we are.
4438 struct extent_buffer *eb = NULL;
4439 eb = btrfs_find_create_tree_block(root, start, len);
4441 btrfs_tree_lock(eb);
4443 mutex_lock(&root->fs_info->alloc_mutex);
4444 ret = lookup_extent_ref(NULL, root, start, len, refs);
4446 mutex_unlock(&root->fs_info->alloc_mutex);
4449 btrfs_tree_unlock(eb);
4450 free_extent_buffer(eb);
4453 printk(KERN_ERR "btrfs block %llu went down to one "
4454 "during drop_snap\n", (unsigned long long)start);
4466 * this is used while deleting old snapshots, and it drops the refs
4467 * on a whole subtree starting from a level 1 node.
4469 * The idea is to sort all the leaf pointers, and then drop the
4470 * ref on all the leaves in order. Most of the time the leaves
4471 * will have ref cache entries, so no leaf IOs will be required to
4472 * find the extents they have references on.
4474 * For each leaf, any references it has are also dropped in order
4476 * This ends up dropping the references in something close to optimal
4477 * order for reading and modifying the extent allocation tree.
4479 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
4480 struct btrfs_root *root,
4481 struct btrfs_path *path)
4486 struct extent_buffer *eb = path->nodes[1];
4487 struct extent_buffer *leaf;
4488 struct btrfs_leaf_ref *ref;
4489 struct refsort *sorted = NULL;
4490 int nritems = btrfs_header_nritems(eb);
4494 int slot = path->slots[1];
4495 u32 blocksize = btrfs_level_size(root, 0);
4501 root_owner = btrfs_header_owner(eb);
4502 root_gen = btrfs_header_generation(eb);
4503 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
4506 * step one, sort all the leaf pointers so we don't scribble
4507 * randomly into the extent allocation tree
4509 for (i = slot; i < nritems; i++) {
4510 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
4511 sorted[refi].slot = i;
4516 * nritems won't be zero, but if we're picking up drop_snapshot
4517 * after a crash, slot might be > 0, so double check things
4523 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
4526 * the first loop frees everything the leaves point to
4528 for (i = 0; i < refi; i++) {
4531 bytenr = sorted[i].bytenr;
4534 * check the reference count on this leaf. If it is > 1
4535 * we just decrement it below and don't update any
4536 * of the refs the leaf points to.
4538 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4544 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
4547 * the leaf only had one reference, which means the
4548 * only thing pointing to this leaf is the snapshot
4549 * we're deleting. It isn't possible for the reference
4550 * count to increase again later
4552 * The reference cache is checked for the leaf,
4553 * and if found we'll be able to drop any refs held by
4554 * the leaf without needing to read it in.
4556 ref = btrfs_lookup_leaf_ref(root, bytenr);
4557 if (ref && ref->generation != ptr_gen) {
4558 btrfs_free_leaf_ref(root, ref);
4562 ret = cache_drop_leaf_ref(trans, root, ref);
4564 btrfs_remove_leaf_ref(root, ref);
4565 btrfs_free_leaf_ref(root, ref);
4568 * the leaf wasn't in the reference cache, so
4569 * we have to read it.
4571 leaf = read_tree_block(root, bytenr, blocksize,
4573 ret = btrfs_drop_leaf_ref(trans, root, leaf);
4575 free_extent_buffer(leaf);
4577 atomic_inc(&root->fs_info->throttle_gen);
4578 wake_up(&root->fs_info->transaction_throttle);
4583 * run through the loop again to free the refs on the leaves.
4584 * This is faster than doing it in the loop above because
4585 * the leaves are likely to be clustered together. We end up
4586 * working in nice chunks on the extent allocation tree.
4588 for (i = 0; i < refi; i++) {
4589 bytenr = sorted[i].bytenr;
4590 ret = btrfs_free_extent(trans, root, bytenr,
4591 blocksize, eb->start,
4592 root_owner, root_gen, 0, 1);
4595 atomic_inc(&root->fs_info->throttle_gen);
4596 wake_up(&root->fs_info->transaction_throttle);
4603 * update the path to show we've processed the entire level 1
4604 * node. This will get saved into the root's drop_snapshot_progress
4605 * field so these drops are not repeated again if this transaction
4608 path->slots[1] = nritems;
4613 * helper function for drop_snapshot, this walks down the tree dropping ref
4614 * counts as it goes.
4616 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4617 struct btrfs_root *root,
4618 struct btrfs_path *path, int *level)
4624 struct extent_buffer *next;
4625 struct extent_buffer *cur;
4626 struct extent_buffer *parent;
4631 WARN_ON(*level < 0);
4632 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4633 ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
4634 path->nodes[*level]->len, &refs);
4640 * walk down to the last node level and free all the leaves
4642 while (*level >= 0) {
4643 WARN_ON(*level < 0);
4644 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4645 cur = path->nodes[*level];
4647 if (btrfs_header_level(cur) != *level)
4650 if (path->slots[*level] >=
4651 btrfs_header_nritems(cur))
4654 /* the new code goes down to level 1 and does all the
4655 * leaves pointed to that node in bulk. So, this check
4656 * for level 0 will always be false.
4658 * But, the disk format allows the drop_snapshot_progress
4659 * field in the root to leave things in a state where
4660 * a leaf will need cleaning up here. If someone crashes
4661 * with the old code and then boots with the new code,
4662 * we might find a leaf here.
4665 ret = btrfs_drop_leaf_ref(trans, root, cur);
4671 * once we get to level one, process the whole node
4672 * at once, including everything below it.
4675 ret = drop_level_one_refs(trans, root, path);
4680 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
4681 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
4682 blocksize = btrfs_level_size(root, *level - 1);
4684 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4689 * if there is more than one reference, we don't need
4690 * to read that node to drop any references it has. We
4691 * just drop the ref we hold on that node and move on to the
4692 * next slot in this level.
4695 parent = path->nodes[*level];
4696 root_owner = btrfs_header_owner(parent);
4697 root_gen = btrfs_header_generation(parent);
4698 path->slots[*level]++;
4700 ret = btrfs_free_extent(trans, root, bytenr,
4701 blocksize, parent->start,
4702 root_owner, root_gen,
4706 atomic_inc(&root->fs_info->throttle_gen);
4707 wake_up(&root->fs_info->transaction_throttle);
4714 * we need to keep freeing things in the next level down.
4715 * read the block and loop around to process it
4717 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
4718 WARN_ON(*level <= 0);
4719 if (path->nodes[*level-1])
4720 free_extent_buffer(path->nodes[*level-1]);
4721 path->nodes[*level-1] = next;
4722 *level = btrfs_header_level(next);
4723 path->slots[*level] = 0;
4727 WARN_ON(*level < 0);
4728 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4730 if (path->nodes[*level] == root->node) {
4731 parent = path->nodes[*level];
4732 bytenr = path->nodes[*level]->start;
4734 parent = path->nodes[*level + 1];
4735 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
4738 blocksize = btrfs_level_size(root, *level);
4739 root_owner = btrfs_header_owner(parent);
4740 root_gen = btrfs_header_generation(parent);
4743 * cleanup and free the reference on the last node
4746 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
4747 parent->start, root_owner, root_gen,
4749 free_extent_buffer(path->nodes[*level]);
4750 path->nodes[*level] = NULL;
4760 struct walk_control {
4761 u64 refs[BTRFS_MAX_LEVEL];
4762 u64 flags[BTRFS_MAX_LEVEL];
4763 struct btrfs_key update_progress;
4771 #define DROP_REFERENCE 1
4772 #define UPDATE_BACKREF 2
4775 * hepler to process tree block while walking down the tree.
4777 * when wc->stage == DROP_REFERENCE, this function checks
4778 * reference count of the block. if the block is shared and
4779 * we need update back refs for the subtree rooted at the
4780 * block, this function changes wc->stage to UPDATE_BACKREF
4782 * when wc->stage == UPDATE_BACKREF, this function updates
4783 * back refs for pointers in the block.
4785 * NOTE: return value 1 means we should stop walking down.
4787 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4788 struct btrfs_root *root,
4789 struct btrfs_path *path,
4790 struct walk_control *wc)
4792 int level = wc->level;
4793 struct extent_buffer *eb = path->nodes[level];
4794 struct btrfs_key key;
4795 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4798 if (wc->stage == UPDATE_BACKREF &&
4799 btrfs_header_owner(eb) != root->root_key.objectid)
4803 * when reference count of tree block is 1, it won't increase
4804 * again. once full backref flag is set, we never clear it.
4806 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4807 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4808 BUG_ON(!path->locks[level]);
4809 ret = btrfs_lookup_extent_info(trans, root,
4814 BUG_ON(wc->refs[level] == 0);
4817 if (wc->stage == DROP_REFERENCE &&
4818 wc->update_ref && wc->refs[level] > 1) {
4819 BUG_ON(eb == root->node);
4820 BUG_ON(path->slots[level] > 0);
4822 btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
4824 btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
4825 if (btrfs_header_owner(eb) == root->root_key.objectid &&
4826 btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
4827 wc->stage = UPDATE_BACKREF;
4828 wc->shared_level = level;
4832 if (wc->stage == DROP_REFERENCE) {
4833 if (wc->refs[level] > 1)
4836 if (path->locks[level] && !wc->keep_locks) {
4837 btrfs_tree_unlock(eb);
4838 path->locks[level] = 0;
4843 /* wc->stage == UPDATE_BACKREF */
4844 if (!(wc->flags[level] & flag)) {
4845 BUG_ON(!path->locks[level]);
4846 ret = btrfs_inc_ref(trans, root, eb, 1);
4848 ret = btrfs_dec_ref(trans, root, eb, 0);
4850 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4853 wc->flags[level] |= flag;
4857 * the block is shared by multiple trees, so it's not good to
4858 * keep the tree lock
4860 if (path->locks[level] && level > 0) {
4861 btrfs_tree_unlock(eb);
4862 path->locks[level] = 0;
4868 * hepler to process tree block while walking up the tree.
4870 * when wc->stage == DROP_REFERENCE, this function drops
4871 * reference count on the block.
4873 * when wc->stage == UPDATE_BACKREF, this function changes
4874 * wc->stage back to DROP_REFERENCE if we changed wc->stage
4875 * to UPDATE_BACKREF previously while processing the block.
4877 * NOTE: return value 1 means we should stop walking up.
4879 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4880 struct btrfs_root *root,
4881 struct btrfs_path *path,
4882 struct walk_control *wc)
4885 int level = wc->level;
4886 struct extent_buffer *eb = path->nodes[level];
4889 if (wc->stage == UPDATE_BACKREF) {
4890 BUG_ON(wc->shared_level < level);
4891 if (level < wc->shared_level)
4894 BUG_ON(wc->refs[level] <= 1);
4895 ret = find_next_key(path, level + 1, &wc->update_progress);
4899 wc->stage = DROP_REFERENCE;
4900 wc->shared_level = -1;
4901 path->slots[level] = 0;
4904 * check reference count again if the block isn't locked.
4905 * we should start walking down the tree again if reference
4908 if (!path->locks[level]) {
4910 btrfs_tree_lock(eb);
4911 btrfs_set_lock_blocking(eb);
4912 path->locks[level] = 1;
4914 ret = btrfs_lookup_extent_info(trans, root,
4919 BUG_ON(wc->refs[level] == 0);
4920 if (wc->refs[level] == 1) {
4921 btrfs_tree_unlock(eb);
4922 path->locks[level] = 0;
4930 /* wc->stage == DROP_REFERENCE */
4931 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4933 if (wc->refs[level] == 1) {
4935 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4936 ret = btrfs_dec_ref(trans, root, eb, 1);
4938 ret = btrfs_dec_ref(trans, root, eb, 0);
4941 /* make block locked assertion in clean_tree_block happy */
4942 if (!path->locks[level] &&
4943 btrfs_header_generation(eb) == trans->transid) {
4944 btrfs_tree_lock(eb);
4945 btrfs_set_lock_blocking(eb);
4946 path->locks[level] = 1;
4948 clean_tree_block(trans, root, eb);
4951 if (eb == root->node) {
4952 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4955 BUG_ON(root->root_key.objectid !=
4956 btrfs_header_owner(eb));
4958 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4959 parent = path->nodes[level + 1]->start;
4961 BUG_ON(root->root_key.objectid !=
4962 btrfs_header_owner(path->nodes[level + 1]));
4965 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4966 root->root_key.objectid, level, 0);
4969 wc->refs[level] = 0;
4970 wc->flags[level] = 0;
4974 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4975 struct btrfs_root *root,
4976 struct btrfs_path *path,
4977 struct walk_control *wc)
4979 struct extent_buffer *next;
4980 struct extent_buffer *cur;
4984 int level = wc->level;
4987 while (level >= 0) {
4988 cur = path->nodes[level];
4989 BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
4991 ret = walk_down_proc(trans, root, path, wc);
4998 bytenr = btrfs_node_blockptr(cur, path->slots[level]);
4999 blocksize = btrfs_level_size(root, level - 1);
5000 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
5002 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
5003 btrfs_tree_lock(next);
5004 btrfs_set_lock_blocking(next);
5007 BUG_ON(level != btrfs_header_level(next));
5008 path->nodes[level] = next;
5009 path->slots[level] = 0;
5010 path->locks[level] = 1;
5016 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5017 struct btrfs_root *root,
5018 struct btrfs_path *path,
5019 struct walk_control *wc, int max_level)
5021 int level = wc->level;
5024 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5025 while (level < max_level && path->nodes[level]) {
5027 if (path->slots[level] + 1 <
5028 btrfs_header_nritems(path->nodes[level])) {
5029 path->slots[level]++;
5032 ret = walk_up_proc(trans, root, path, wc);
5036 if (path->locks[level]) {
5037 btrfs_tree_unlock(path->nodes[level]);
5038 path->locks[level] = 0;
5040 free_extent_buffer(path->nodes[level]);
5041 path->nodes[level] = NULL;
5049 * drop a subvolume tree.
5051 * this function traverses the tree freeing any blocks that only
5052 * referenced by the tree.
5054 * when a shared tree block is found. this function decreases its
5055 * reference count by one. if update_ref is true, this function
5056 * also make sure backrefs for the shared block and all lower level
5057 * blocks are properly updated.
5059 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5061 struct btrfs_path *path;
5062 struct btrfs_trans_handle *trans;
5063 struct btrfs_root *tree_root = root->fs_info->tree_root;
5064 struct btrfs_root_item *root_item = &root->root_item;
5065 struct walk_control *wc;
5066 struct btrfs_key key;
5071 path = btrfs_alloc_path();
5074 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5077 trans = btrfs_start_transaction(tree_root, 1);
5079 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5080 level = btrfs_header_level(root->node);
5081 path->nodes[level] = btrfs_lock_root_node(root);
5082 btrfs_set_lock_blocking(path->nodes[level]);
5083 path->slots[level] = 0;
5084 path->locks[level] = 1;
5085 memset(&wc->update_progress, 0,
5086 sizeof(wc->update_progress));
5088 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5089 memcpy(&wc->update_progress, &key,
5090 sizeof(wc->update_progress));
5092 level = root_item->drop_level;
5094 path->lowest_level = level;
5095 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5096 path->lowest_level = 0;
5101 btrfs_node_key_to_cpu(path->nodes[level], &key,
5102 path->slots[level]);
5103 WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
5106 * unlock our path, this is safe because only this
5107 * function is allowed to delete this snapshot
5109 btrfs_unlock_up_safe(path, 0);
5111 level = btrfs_header_level(root->node);
5113 btrfs_tree_lock(path->nodes[level]);
5114 btrfs_set_lock_blocking(path->nodes[level]);
5116 ret = btrfs_lookup_extent_info(trans, root,
5117 path->nodes[level]->start,
5118 path->nodes[level]->len,
5122 BUG_ON(wc->refs[level] == 0);
5124 if (level == root_item->drop_level)
5127 btrfs_tree_unlock(path->nodes[level]);
5128 WARN_ON(wc->refs[level] != 1);
5134 wc->shared_level = -1;
5135 wc->stage = DROP_REFERENCE;
5136 wc->update_ref = update_ref;
5140 ret = walk_down_tree(trans, root, path, wc);
5146 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5153 BUG_ON(wc->stage != DROP_REFERENCE);
5157 if (wc->stage == DROP_REFERENCE) {
5159 btrfs_node_key(path->nodes[level],
5160 &root_item->drop_progress,
5161 path->slots[level]);
5162 root_item->drop_level = level;
5165 BUG_ON(wc->level == 0);
5166 if (trans->transaction->in_commit ||
5167 trans->transaction->delayed_refs.flushing) {
5168 ret = btrfs_update_root(trans, tree_root,
5173 btrfs_end_transaction(trans, tree_root);
5174 trans = btrfs_start_transaction(tree_root, 1);
5176 unsigned long update;
5177 update = trans->delayed_ref_updates;
5178 trans->delayed_ref_updates = 0;
5180 btrfs_run_delayed_refs(trans, tree_root,
5184 btrfs_release_path(root, path);
5187 ret = btrfs_del_root(trans, tree_root, &root->root_key);
5190 free_extent_buffer(root->node);
5191 free_extent_buffer(root->commit_root);
5194 btrfs_end_transaction(trans, tree_root);
5196 btrfs_free_path(path);
5201 * drop subtree rooted at tree block 'node'.
5203 * NOTE: this function will unlock and release tree block 'node'
5205 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5206 struct btrfs_root *root,
5207 struct extent_buffer *node,
5208 struct extent_buffer *parent)
5210 struct btrfs_path *path;
5211 struct walk_control *wc;
5217 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5219 path = btrfs_alloc_path();
5222 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5225 btrfs_assert_tree_locked(parent);
5226 parent_level = btrfs_header_level(parent);
5227 extent_buffer_get(parent);
5228 path->nodes[parent_level] = parent;
5229 path->slots[parent_level] = btrfs_header_nritems(parent);
5231 btrfs_assert_tree_locked(node);
5232 level = btrfs_header_level(node);
5233 path->nodes[level] = node;
5234 path->slots[level] = 0;
5235 path->locks[level] = 1;
5237 wc->refs[parent_level] = 1;
5238 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5240 wc->shared_level = -1;
5241 wc->stage = DROP_REFERENCE;
5246 wret = walk_down_tree(trans, root, path, wc);
5252 wret = walk_up_tree(trans, root, path, wc, parent_level);
5260 btrfs_free_path(path);
5265 static unsigned long calc_ra(unsigned long start, unsigned long last,
5268 return min(last, start + nr - 1);
5271 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5276 unsigned long first_index;
5277 unsigned long last_index;
5280 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5281 struct file_ra_state *ra;
5282 struct btrfs_ordered_extent *ordered;
5283 unsigned int total_read = 0;
5284 unsigned int total_dirty = 0;
5287 ra = kzalloc(sizeof(*ra), GFP_NOFS);
5289 mutex_lock(&inode->i_mutex);
5290 first_index = start >> PAGE_CACHE_SHIFT;
5291 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5293 /* make sure the dirty trick played by the caller work */
5294 ret = invalidate_inode_pages2_range(inode->i_mapping,
5295 first_index, last_index);
5299 file_ra_state_init(ra, inode->i_mapping);
5301 for (i = first_index ; i <= last_index; i++) {
5302 if (total_read % ra->ra_pages == 0) {
5303 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5304 calc_ra(i, last_index, ra->ra_pages));
5308 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5310 page = grab_cache_page(inode->i_mapping, i);
5315 if (!PageUptodate(page)) {
5316 btrfs_readpage(NULL, page);
5318 if (!PageUptodate(page)) {
5320 page_cache_release(page);
5325 wait_on_page_writeback(page);
5327 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5328 page_end = page_start + PAGE_CACHE_SIZE - 1;
5329 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5331 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5333 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5335 page_cache_release(page);
5336 btrfs_start_ordered_extent(inode, ordered, 1);
5337 btrfs_put_ordered_extent(ordered);
5340 set_page_extent_mapped(page);
5342 if (i == first_index)
5343 set_extent_bits(io_tree, page_start, page_end,
5344 EXTENT_BOUNDARY, GFP_NOFS);
5345 btrfs_set_extent_delalloc(inode, page_start, page_end);
5347 set_page_dirty(page);
5350 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5352 page_cache_release(page);
5357 mutex_unlock(&inode->i_mutex);
5358 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5362 static noinline int relocate_data_extent(struct inode *reloc_inode,
5363 struct btrfs_key *extent_key,
5366 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5367 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5368 struct extent_map *em;
5369 u64 start = extent_key->objectid - offset;
5370 u64 end = start + extent_key->offset - 1;
5372 em = alloc_extent_map(GFP_NOFS);
5373 BUG_ON(!em || IS_ERR(em));
5376 em->len = extent_key->offset;
5377 em->block_len = extent_key->offset;
5378 em->block_start = extent_key->objectid;
5379 em->bdev = root->fs_info->fs_devices->latest_bdev;
5380 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5382 /* setup extent map to cheat btrfs_readpage */
5383 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5386 spin_lock(&em_tree->lock);
5387 ret = add_extent_mapping(em_tree, em);
5388 spin_unlock(&em_tree->lock);
5389 if (ret != -EEXIST) {
5390 free_extent_map(em);
5393 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5395 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5397 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5400 struct btrfs_ref_path {
5402 u64 nodes[BTRFS_MAX_LEVEL];
5404 u64 root_generation;
5411 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5412 u64 new_nodes[BTRFS_MAX_LEVEL];
5415 struct disk_extent {
5426 static int is_cowonly_root(u64 root_objectid)
5428 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5429 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5430 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5431 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5432 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5433 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5438 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5439 struct btrfs_root *extent_root,
5440 struct btrfs_ref_path *ref_path,
5443 struct extent_buffer *leaf;
5444 struct btrfs_path *path;
5445 struct btrfs_extent_ref *ref;
5446 struct btrfs_key key;
5447 struct btrfs_key found_key;
5453 path = btrfs_alloc_path();
5458 ref_path->lowest_level = -1;
5459 ref_path->current_level = -1;
5460 ref_path->shared_level = -1;
5464 level = ref_path->current_level - 1;
5465 while (level >= -1) {
5467 if (level < ref_path->lowest_level)
5471 bytenr = ref_path->nodes[level];
5473 bytenr = ref_path->extent_start;
5474 BUG_ON(bytenr == 0);
5476 parent = ref_path->nodes[level + 1];
5477 ref_path->nodes[level + 1] = 0;
5478 ref_path->current_level = level;
5479 BUG_ON(parent == 0);
5481 key.objectid = bytenr;
5482 key.offset = parent + 1;
5483 key.type = BTRFS_EXTENT_REF_KEY;
5485 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5490 leaf = path->nodes[0];
5491 nritems = btrfs_header_nritems(leaf);
5492 if (path->slots[0] >= nritems) {
5493 ret = btrfs_next_leaf(extent_root, path);
5498 leaf = path->nodes[0];
5501 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5502 if (found_key.objectid == bytenr &&
5503 found_key.type == BTRFS_EXTENT_REF_KEY) {
5504 if (level < ref_path->shared_level)
5505 ref_path->shared_level = level;
5510 btrfs_release_path(extent_root, path);
5513 /* reached lowest level */
5517 level = ref_path->current_level;
5518 while (level < BTRFS_MAX_LEVEL - 1) {
5522 bytenr = ref_path->nodes[level];
5524 bytenr = ref_path->extent_start;
5526 BUG_ON(bytenr == 0);
5528 key.objectid = bytenr;
5530 key.type = BTRFS_EXTENT_REF_KEY;
5532 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5536 leaf = path->nodes[0];
5537 nritems = btrfs_header_nritems(leaf);
5538 if (path->slots[0] >= nritems) {
5539 ret = btrfs_next_leaf(extent_root, path);
5543 /* the extent was freed by someone */
5544 if (ref_path->lowest_level == level)
5546 btrfs_release_path(extent_root, path);
5549 leaf = path->nodes[0];
5552 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5553 if (found_key.objectid != bytenr ||
5554 found_key.type != BTRFS_EXTENT_REF_KEY) {
5555 /* the extent was freed by someone */
5556 if (ref_path->lowest_level == level) {
5560 btrfs_release_path(extent_root, path);
5564 ref = btrfs_item_ptr(leaf, path->slots[0],
5565 struct btrfs_extent_ref);
5566 ref_objectid = btrfs_ref_objectid(leaf, ref);
5567 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5569 level = (int)ref_objectid;
5570 BUG_ON(level >= BTRFS_MAX_LEVEL);
5571 ref_path->lowest_level = level;
5572 ref_path->current_level = level;
5573 ref_path->nodes[level] = bytenr;
5575 WARN_ON(ref_objectid != level);
5578 WARN_ON(level != -1);
5582 if (ref_path->lowest_level == level) {
5583 ref_path->owner_objectid = ref_objectid;
5584 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5588 * the block is tree root or the block isn't in reference
5591 if (found_key.objectid == found_key.offset ||
5592 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5593 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5594 ref_path->root_generation =
5595 btrfs_ref_generation(leaf, ref);
5597 /* special reference from the tree log */
5598 ref_path->nodes[0] = found_key.offset;
5599 ref_path->current_level = 0;
5606 BUG_ON(ref_path->nodes[level] != 0);
5607 ref_path->nodes[level] = found_key.offset;
5608 ref_path->current_level = level;
5611 * the reference was created in the running transaction,
5612 * no need to continue walking up.
5614 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5615 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5616 ref_path->root_generation =
5617 btrfs_ref_generation(leaf, ref);
5622 btrfs_release_path(extent_root, path);
5625 /* reached max tree level, but no tree root found. */
5628 btrfs_free_path(path);
5632 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5633 struct btrfs_root *extent_root,
5634 struct btrfs_ref_path *ref_path,
5637 memset(ref_path, 0, sizeof(*ref_path));
5638 ref_path->extent_start = extent_start;
5640 return __next_ref_path(trans, extent_root, ref_path, 1);
5643 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5644 struct btrfs_root *extent_root,
5645 struct btrfs_ref_path *ref_path)
5647 return __next_ref_path(trans, extent_root, ref_path, 0);
5650 static noinline int get_new_locations(struct inode *reloc_inode,
5651 struct btrfs_key *extent_key,
5652 u64 offset, int no_fragment,
5653 struct disk_extent **extents,
5656 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5657 struct btrfs_path *path;
5658 struct btrfs_file_extent_item *fi;
5659 struct extent_buffer *leaf;
5660 struct disk_extent *exts = *extents;
5661 struct btrfs_key found_key;
5666 int max = *nr_extents;
5669 WARN_ON(!no_fragment && *extents);
5672 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5677 path = btrfs_alloc_path();
5680 cur_pos = extent_key->objectid - offset;
5681 last_byte = extent_key->objectid + extent_key->offset;
5682 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5692 leaf = path->nodes[0];
5693 nritems = btrfs_header_nritems(leaf);
5694 if (path->slots[0] >= nritems) {
5695 ret = btrfs_next_leaf(root, path);
5700 leaf = path->nodes[0];
5703 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5704 if (found_key.offset != cur_pos ||
5705 found_key.type != BTRFS_EXTENT_DATA_KEY ||
5706 found_key.objectid != reloc_inode->i_ino)
5709 fi = btrfs_item_ptr(leaf, path->slots[0],
5710 struct btrfs_file_extent_item);
5711 if (btrfs_file_extent_type(leaf, fi) !=
5712 BTRFS_FILE_EXTENT_REG ||
5713 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5717 struct disk_extent *old = exts;
5719 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5720 memcpy(exts, old, sizeof(*exts) * nr);
5721 if (old != *extents)
5725 exts[nr].disk_bytenr =
5726 btrfs_file_extent_disk_bytenr(leaf, fi);
5727 exts[nr].disk_num_bytes =
5728 btrfs_file_extent_disk_num_bytes(leaf, fi);
5729 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5730 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5731 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5732 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5733 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5734 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5736 BUG_ON(exts[nr].offset > 0);
5737 BUG_ON(exts[nr].compression || exts[nr].encryption);
5738 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5740 cur_pos += exts[nr].num_bytes;
5743 if (cur_pos + offset >= last_byte)
5753 BUG_ON(cur_pos + offset > last_byte);
5754 if (cur_pos + offset < last_byte) {
5760 btrfs_free_path(path);
5762 if (exts != *extents)
5771 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
5772 struct btrfs_root *root,
5773 struct btrfs_path *path,
5774 struct btrfs_key *extent_key,
5775 struct btrfs_key *leaf_key,
5776 struct btrfs_ref_path *ref_path,
5777 struct disk_extent *new_extents,
5780 struct extent_buffer *leaf;
5781 struct btrfs_file_extent_item *fi;
5782 struct inode *inode = NULL;
5783 struct btrfs_key key;
5788 u64 search_end = (u64)-1;
5791 int extent_locked = 0;
5795 memcpy(&key, leaf_key, sizeof(key));
5796 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5797 if (key.objectid < ref_path->owner_objectid ||
5798 (key.objectid == ref_path->owner_objectid &&
5799 key.type < BTRFS_EXTENT_DATA_KEY)) {
5800 key.objectid = ref_path->owner_objectid;
5801 key.type = BTRFS_EXTENT_DATA_KEY;
5807 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
5811 leaf = path->nodes[0];
5812 nritems = btrfs_header_nritems(leaf);
5814 if (extent_locked && ret > 0) {
5816 * the file extent item was modified by someone
5817 * before the extent got locked.
5819 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5820 lock_end, GFP_NOFS);
5824 if (path->slots[0] >= nritems) {
5825 if (++nr_scaned > 2)
5828 BUG_ON(extent_locked);
5829 ret = btrfs_next_leaf(root, path);
5834 leaf = path->nodes[0];
5835 nritems = btrfs_header_nritems(leaf);
5838 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5840 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5841 if ((key.objectid > ref_path->owner_objectid) ||
5842 (key.objectid == ref_path->owner_objectid &&
5843 key.type > BTRFS_EXTENT_DATA_KEY) ||
5844 key.offset >= search_end)
5848 if (inode && key.objectid != inode->i_ino) {
5849 BUG_ON(extent_locked);
5850 btrfs_release_path(root, path);
5851 mutex_unlock(&inode->i_mutex);
5857 if (key.type != BTRFS_EXTENT_DATA_KEY) {
5862 fi = btrfs_item_ptr(leaf, path->slots[0],
5863 struct btrfs_file_extent_item);
5864 extent_type = btrfs_file_extent_type(leaf, fi);
5865 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
5866 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
5867 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
5868 extent_key->objectid)) {
5874 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5875 ext_offset = btrfs_file_extent_offset(leaf, fi);
5877 if (search_end == (u64)-1) {
5878 search_end = key.offset - ext_offset +
5879 btrfs_file_extent_ram_bytes(leaf, fi);
5882 if (!extent_locked) {
5883 lock_start = key.offset;
5884 lock_end = lock_start + num_bytes - 1;
5886 if (lock_start > key.offset ||
5887 lock_end + 1 < key.offset + num_bytes) {
5888 unlock_extent(&BTRFS_I(inode)->io_tree,
5889 lock_start, lock_end, GFP_NOFS);
5895 btrfs_release_path(root, path);
5897 inode = btrfs_iget_locked(root->fs_info->sb,
5898 key.objectid, root);
5899 if (inode->i_state & I_NEW) {
5900 BTRFS_I(inode)->root = root;
5901 BTRFS_I(inode)->location.objectid =
5903 BTRFS_I(inode)->location.type =
5904 BTRFS_INODE_ITEM_KEY;
5905 BTRFS_I(inode)->location.offset = 0;
5906 btrfs_read_locked_inode(inode);
5907 unlock_new_inode(inode);
5910 * some code call btrfs_commit_transaction while
5911 * holding the i_mutex, so we can't use mutex_lock
5914 if (is_bad_inode(inode) ||
5915 !mutex_trylock(&inode->i_mutex)) {
5918 key.offset = (u64)-1;
5923 if (!extent_locked) {
5924 struct btrfs_ordered_extent *ordered;
5926 btrfs_release_path(root, path);
5928 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5929 lock_end, GFP_NOFS);
5930 ordered = btrfs_lookup_first_ordered_extent(inode,
5933 ordered->file_offset <= lock_end &&
5934 ordered->file_offset + ordered->len > lock_start) {
5935 unlock_extent(&BTRFS_I(inode)->io_tree,
5936 lock_start, lock_end, GFP_NOFS);
5937 btrfs_start_ordered_extent(inode, ordered, 1);
5938 btrfs_put_ordered_extent(ordered);
5939 key.offset += num_bytes;
5943 btrfs_put_ordered_extent(ordered);
5949 if (nr_extents == 1) {
5950 /* update extent pointer in place */
5951 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5952 new_extents[0].disk_bytenr);
5953 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5954 new_extents[0].disk_num_bytes);
5955 btrfs_mark_buffer_dirty(leaf);
5957 btrfs_drop_extent_cache(inode, key.offset,
5958 key.offset + num_bytes - 1, 0);
5960 ret = btrfs_inc_extent_ref(trans, root,
5961 new_extents[0].disk_bytenr,
5962 new_extents[0].disk_num_bytes,
5964 root->root_key.objectid,
5969 ret = btrfs_free_extent(trans, root,
5970 extent_key->objectid,
5973 btrfs_header_owner(leaf),
5974 btrfs_header_generation(leaf),
5978 btrfs_release_path(root, path);
5979 key.offset += num_bytes;
5987 * drop old extent pointer at first, then insert the
5988 * new pointers one bye one
5990 btrfs_release_path(root, path);
5991 ret = btrfs_drop_extents(trans, root, inode, key.offset,
5992 key.offset + num_bytes,
5993 key.offset, &alloc_hint);
5996 for (i = 0; i < nr_extents; i++) {
5997 if (ext_offset >= new_extents[i].num_bytes) {
5998 ext_offset -= new_extents[i].num_bytes;
6001 extent_len = min(new_extents[i].num_bytes -
6002 ext_offset, num_bytes);
6004 ret = btrfs_insert_empty_item(trans, root,
6009 leaf = path->nodes[0];
6010 fi = btrfs_item_ptr(leaf, path->slots[0],
6011 struct btrfs_file_extent_item);
6012 btrfs_set_file_extent_generation(leaf, fi,
6014 btrfs_set_file_extent_type(leaf, fi,
6015 BTRFS_FILE_EXTENT_REG);
6016 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6017 new_extents[i].disk_bytenr);
6018 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6019 new_extents[i].disk_num_bytes);
6020 btrfs_set_file_extent_ram_bytes(leaf, fi,
6021 new_extents[i].ram_bytes);
6023 btrfs_set_file_extent_compression(leaf, fi,
6024 new_extents[i].compression);
6025 btrfs_set_file_extent_encryption(leaf, fi,
6026 new_extents[i].encryption);
6027 btrfs_set_file_extent_other_encoding(leaf, fi,
6028 new_extents[i].other_encoding);
6030 btrfs_set_file_extent_num_bytes(leaf, fi,
6032 ext_offset += new_extents[i].offset;
6033 btrfs_set_file_extent_offset(leaf, fi,
6035 btrfs_mark_buffer_dirty(leaf);
6037 btrfs_drop_extent_cache(inode, key.offset,
6038 key.offset + extent_len - 1, 0);
6040 ret = btrfs_inc_extent_ref(trans, root,
6041 new_extents[i].disk_bytenr,
6042 new_extents[i].disk_num_bytes,
6044 root->root_key.objectid,
6045 trans->transid, key.objectid);
6047 btrfs_release_path(root, path);
6049 inode_add_bytes(inode, extent_len);
6052 num_bytes -= extent_len;
6053 key.offset += extent_len;
6058 BUG_ON(i >= nr_extents);
6062 if (extent_locked) {
6063 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6064 lock_end, GFP_NOFS);
6068 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6069 key.offset >= search_end)
6076 btrfs_release_path(root, path);
6078 mutex_unlock(&inode->i_mutex);
6079 if (extent_locked) {
6080 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6081 lock_end, GFP_NOFS);
6088 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6089 struct btrfs_root *root,
6090 struct extent_buffer *buf, u64 orig_start)
6095 BUG_ON(btrfs_header_generation(buf) != trans->transid);
6096 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6098 level = btrfs_header_level(buf);
6100 struct btrfs_leaf_ref *ref;
6101 struct btrfs_leaf_ref *orig_ref;
6103 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6107 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6109 btrfs_free_leaf_ref(root, orig_ref);
6113 ref->nritems = orig_ref->nritems;
6114 memcpy(ref->extents, orig_ref->extents,
6115 sizeof(ref->extents[0]) * ref->nritems);
6117 btrfs_free_leaf_ref(root, orig_ref);
6119 ref->root_gen = trans->transid;
6120 ref->bytenr = buf->start;
6121 ref->owner = btrfs_header_owner(buf);
6122 ref->generation = btrfs_header_generation(buf);
6124 ret = btrfs_add_leaf_ref(root, ref, 0);
6126 btrfs_free_leaf_ref(root, ref);
6131 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6132 struct extent_buffer *leaf,
6133 struct btrfs_block_group_cache *group,
6134 struct btrfs_root *target_root)
6136 struct btrfs_key key;
6137 struct inode *inode = NULL;
6138 struct btrfs_file_extent_item *fi;
6140 u64 skip_objectid = 0;
6144 nritems = btrfs_header_nritems(leaf);
6145 for (i = 0; i < nritems; i++) {
6146 btrfs_item_key_to_cpu(leaf, &key, i);
6147 if (key.objectid == skip_objectid ||
6148 key.type != BTRFS_EXTENT_DATA_KEY)
6150 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6151 if (btrfs_file_extent_type(leaf, fi) ==
6152 BTRFS_FILE_EXTENT_INLINE)
6154 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6156 if (!inode || inode->i_ino != key.objectid) {
6158 inode = btrfs_ilookup(target_root->fs_info->sb,
6159 key.objectid, target_root, 1);
6162 skip_objectid = key.objectid;
6165 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6167 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6168 key.offset + num_bytes - 1, GFP_NOFS);
6169 btrfs_drop_extent_cache(inode, key.offset,
6170 key.offset + num_bytes - 1, 1);
6171 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6172 key.offset + num_bytes - 1, GFP_NOFS);
6179 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6180 struct btrfs_root *root,
6181 struct extent_buffer *leaf,
6182 struct btrfs_block_group_cache *group,
6183 struct inode *reloc_inode)
6185 struct btrfs_key key;
6186 struct btrfs_key extent_key;
6187 struct btrfs_file_extent_item *fi;
6188 struct btrfs_leaf_ref *ref;
6189 struct disk_extent *new_extent;
6198 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6199 BUG_ON(!new_extent);
6201 ref = btrfs_lookup_leaf_ref(root, leaf->start);
6205 nritems = btrfs_header_nritems(leaf);
6206 for (i = 0; i < nritems; i++) {
6207 btrfs_item_key_to_cpu(leaf, &key, i);
6208 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6210 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6211 if (btrfs_file_extent_type(leaf, fi) ==
6212 BTRFS_FILE_EXTENT_INLINE)
6214 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6215 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6220 if (bytenr >= group->key.objectid + group->key.offset ||
6221 bytenr + num_bytes <= group->key.objectid)
6224 extent_key.objectid = bytenr;
6225 extent_key.offset = num_bytes;
6226 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6228 ret = get_new_locations(reloc_inode, &extent_key,
6229 group->key.objectid, 1,
6230 &new_extent, &nr_extent);
6235 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6236 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6237 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6238 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6240 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6241 new_extent->disk_bytenr);
6242 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6243 new_extent->disk_num_bytes);
6244 btrfs_mark_buffer_dirty(leaf);
6246 ret = btrfs_inc_extent_ref(trans, root,
6247 new_extent->disk_bytenr,
6248 new_extent->disk_num_bytes,
6250 root->root_key.objectid,
6251 trans->transid, key.objectid);
6254 ret = btrfs_free_extent(trans, root,
6255 bytenr, num_bytes, leaf->start,
6256 btrfs_header_owner(leaf),
6257 btrfs_header_generation(leaf),
6263 BUG_ON(ext_index + 1 != ref->nritems);
6264 btrfs_free_leaf_ref(root, ref);
6268 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6269 struct btrfs_root *root)
6271 struct btrfs_root *reloc_root;
6274 if (root->reloc_root) {
6275 reloc_root = root->reloc_root;
6276 root->reloc_root = NULL;
6277 list_add(&reloc_root->dead_list,
6278 &root->fs_info->dead_reloc_roots);
6280 btrfs_set_root_bytenr(&reloc_root->root_item,
6281 reloc_root->node->start);
6282 btrfs_set_root_level(&root->root_item,
6283 btrfs_header_level(reloc_root->node));
6284 memset(&reloc_root->root_item.drop_progress, 0,
6285 sizeof(struct btrfs_disk_key));
6286 reloc_root->root_item.drop_level = 0;
6288 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6289 &reloc_root->root_key,
6290 &reloc_root->root_item);
6296 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6298 struct btrfs_trans_handle *trans;
6299 struct btrfs_root *reloc_root;
6300 struct btrfs_root *prev_root = NULL;
6301 struct list_head dead_roots;
6305 INIT_LIST_HEAD(&dead_roots);
6306 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6308 while (!list_empty(&dead_roots)) {
6309 reloc_root = list_entry(dead_roots.prev,
6310 struct btrfs_root, dead_list);
6311 list_del_init(&reloc_root->dead_list);
6313 BUG_ON(reloc_root->commit_root != NULL);
6315 trans = btrfs_join_transaction(root, 1);
6318 mutex_lock(&root->fs_info->drop_mutex);
6319 ret = btrfs_drop_snapshot(trans, reloc_root);
6322 mutex_unlock(&root->fs_info->drop_mutex);
6324 nr = trans->blocks_used;
6325 ret = btrfs_end_transaction(trans, root);
6327 btrfs_btree_balance_dirty(root, nr);
6330 free_extent_buffer(reloc_root->node);
6332 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6333 &reloc_root->root_key);
6335 mutex_unlock(&root->fs_info->drop_mutex);
6337 nr = trans->blocks_used;
6338 ret = btrfs_end_transaction(trans, root);
6340 btrfs_btree_balance_dirty(root, nr);
6343 prev_root = reloc_root;
6346 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6352 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6354 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6358 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6360 struct btrfs_root *reloc_root;
6361 struct btrfs_trans_handle *trans;
6362 struct btrfs_key location;
6366 mutex_lock(&root->fs_info->tree_reloc_mutex);
6367 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6369 found = !list_empty(&root->fs_info->dead_reloc_roots);
6370 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6373 trans = btrfs_start_transaction(root, 1);
6375 ret = btrfs_commit_transaction(trans, root);
6379 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6380 location.offset = (u64)-1;
6381 location.type = BTRFS_ROOT_ITEM_KEY;
6383 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6384 BUG_ON(!reloc_root);
6385 btrfs_orphan_cleanup(reloc_root);
6389 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6390 struct btrfs_root *root)
6392 struct btrfs_root *reloc_root;
6393 struct extent_buffer *eb;
6394 struct btrfs_root_item *root_item;
6395 struct btrfs_key root_key;
6398 BUG_ON(!root->ref_cows);
6399 if (root->reloc_root)
6402 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6405 ret = btrfs_copy_root(trans, root, root->commit_root,
6406 &eb, BTRFS_TREE_RELOC_OBJECTID);
6409 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6410 root_key.offset = root->root_key.objectid;
6411 root_key.type = BTRFS_ROOT_ITEM_KEY;
6413 memcpy(root_item, &root->root_item, sizeof(root_item));
6414 btrfs_set_root_refs(root_item, 0);
6415 btrfs_set_root_bytenr(root_item, eb->start);
6416 btrfs_set_root_level(root_item, btrfs_header_level(eb));
6417 btrfs_set_root_generation(root_item, trans->transid);
6419 btrfs_tree_unlock(eb);
6420 free_extent_buffer(eb);
6422 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6423 &root_key, root_item);
6427 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6429 BUG_ON(!reloc_root);
6430 reloc_root->last_trans = trans->transid;
6431 reloc_root->commit_root = NULL;
6432 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6434 root->reloc_root = reloc_root;
6439 * Core function of space balance.
6441 * The idea is using reloc trees to relocate tree blocks in reference
6442 * counted roots. There is one reloc tree for each subvol, and all
6443 * reloc trees share same root key objectid. Reloc trees are snapshots
6444 * of the latest committed roots of subvols (root->commit_root).
6446 * To relocate a tree block referenced by a subvol, there are two steps.
6447 * COW the block through subvol's reloc tree, then update block pointer
6448 * in the subvol to point to the new block. Since all reloc trees share
6449 * same root key objectid, doing special handing for tree blocks owned
6450 * by them is easy. Once a tree block has been COWed in one reloc tree,
6451 * we can use the resulting new block directly when the same block is
6452 * required to COW again through other reloc trees. By this way, relocated
6453 * tree blocks are shared between reloc trees, so they are also shared
6456 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6457 struct btrfs_root *root,
6458 struct btrfs_path *path,
6459 struct btrfs_key *first_key,
6460 struct btrfs_ref_path *ref_path,
6461 struct btrfs_block_group_cache *group,
6462 struct inode *reloc_inode)
6464 struct btrfs_root *reloc_root;
6465 struct extent_buffer *eb = NULL;
6466 struct btrfs_key *keys;
6470 int lowest_level = 0;
6473 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6474 lowest_level = ref_path->owner_objectid;
6476 if (!root->ref_cows) {
6477 path->lowest_level = lowest_level;
6478 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6480 path->lowest_level = 0;
6481 btrfs_release_path(root, path);
6485 mutex_lock(&root->fs_info->tree_reloc_mutex);
6486 ret = init_reloc_tree(trans, root);
6488 reloc_root = root->reloc_root;
6490 shared_level = ref_path->shared_level;
6491 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6493 keys = ref_path->node_keys;
6494 nodes = ref_path->new_nodes;
6495 memset(&keys[shared_level + 1], 0,
6496 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6497 memset(&nodes[shared_level + 1], 0,
6498 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6500 if (nodes[lowest_level] == 0) {
6501 path->lowest_level = lowest_level;
6502 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6505 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6506 eb = path->nodes[level];
6507 if (!eb || eb == reloc_root->node)
6509 nodes[level] = eb->start;
6511 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6513 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6516 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6517 eb = path->nodes[0];
6518 ret = replace_extents_in_leaf(trans, reloc_root, eb,
6519 group, reloc_inode);
6522 btrfs_release_path(reloc_root, path);
6524 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6530 * replace tree blocks in the fs tree with tree blocks in
6533 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6536 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6537 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6540 extent_buffer_get(path->nodes[0]);
6541 eb = path->nodes[0];
6542 btrfs_release_path(reloc_root, path);
6543 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6545 free_extent_buffer(eb);
6548 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6549 path->lowest_level = 0;
6553 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6554 struct btrfs_root *root,
6555 struct btrfs_path *path,
6556 struct btrfs_key *first_key,
6557 struct btrfs_ref_path *ref_path)
6561 ret = relocate_one_path(trans, root, path, first_key,
6562 ref_path, NULL, NULL);
6568 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6569 struct btrfs_root *extent_root,
6570 struct btrfs_path *path,
6571 struct btrfs_key *extent_key)
6575 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6578 ret = btrfs_del_item(trans, extent_root, path);
6580 btrfs_release_path(extent_root, path);
6584 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6585 struct btrfs_ref_path *ref_path)
6587 struct btrfs_key root_key;
6589 root_key.objectid = ref_path->root_objectid;
6590 root_key.type = BTRFS_ROOT_ITEM_KEY;
6591 if (is_cowonly_root(ref_path->root_objectid))
6592 root_key.offset = 0;
6594 root_key.offset = (u64)-1;
6596 return btrfs_read_fs_root_no_name(fs_info, &root_key);
6599 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6600 struct btrfs_path *path,
6601 struct btrfs_key *extent_key,
6602 struct btrfs_block_group_cache *group,
6603 struct inode *reloc_inode, int pass)
6605 struct btrfs_trans_handle *trans;
6606 struct btrfs_root *found_root;
6607 struct btrfs_ref_path *ref_path = NULL;
6608 struct disk_extent *new_extents = NULL;
6613 struct btrfs_key first_key;
6617 trans = btrfs_start_transaction(extent_root, 1);
6620 if (extent_key->objectid == 0) {
6621 ret = del_extent_zero(trans, extent_root, path, extent_key);
6625 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6631 for (loops = 0; ; loops++) {
6633 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6634 extent_key->objectid);
6636 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6643 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6644 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6647 found_root = read_ref_root(extent_root->fs_info, ref_path);
6648 BUG_ON(!found_root);
6650 * for reference counted tree, only process reference paths
6651 * rooted at the latest committed root.
6653 if (found_root->ref_cows &&
6654 ref_path->root_generation != found_root->root_key.offset)
6657 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6660 * copy data extents to new locations
6662 u64 group_start = group->key.objectid;
6663 ret = relocate_data_extent(reloc_inode,
6672 level = ref_path->owner_objectid;
6675 if (prev_block != ref_path->nodes[level]) {
6676 struct extent_buffer *eb;
6677 u64 block_start = ref_path->nodes[level];
6678 u64 block_size = btrfs_level_size(found_root, level);
6680 eb = read_tree_block(found_root, block_start,
6682 btrfs_tree_lock(eb);
6683 BUG_ON(level != btrfs_header_level(eb));
6686 btrfs_item_key_to_cpu(eb, &first_key, 0);
6688 btrfs_node_key_to_cpu(eb, &first_key, 0);
6690 btrfs_tree_unlock(eb);
6691 free_extent_buffer(eb);
6692 prev_block = block_start;
6695 mutex_lock(&extent_root->fs_info->trans_mutex);
6696 btrfs_record_root_in_trans(found_root);
6697 mutex_unlock(&extent_root->fs_info->trans_mutex);
6698 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6700 * try to update data extent references while
6701 * keeping metadata shared between snapshots.
6704 ret = relocate_one_path(trans, found_root,
6705 path, &first_key, ref_path,
6706 group, reloc_inode);
6712 * use fallback method to process the remaining
6716 u64 group_start = group->key.objectid;
6717 new_extents = kmalloc(sizeof(*new_extents),
6720 ret = get_new_locations(reloc_inode,
6728 ret = replace_one_extent(trans, found_root,
6730 &first_key, ref_path,
6731 new_extents, nr_extents);
6733 ret = relocate_tree_block(trans, found_root, path,
6734 &first_key, ref_path);
6741 btrfs_end_transaction(trans, extent_root);
6748 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6751 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6752 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6754 num_devices = root->fs_info->fs_devices->rw_devices;
6755 if (num_devices == 1) {
6756 stripped |= BTRFS_BLOCK_GROUP_DUP;
6757 stripped = flags & ~stripped;
6759 /* turn raid0 into single device chunks */
6760 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6763 /* turn mirroring into duplication */
6764 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6765 BTRFS_BLOCK_GROUP_RAID10))
6766 return stripped | BTRFS_BLOCK_GROUP_DUP;
6769 /* they already had raid on here, just return */
6770 if (flags & stripped)
6773 stripped |= BTRFS_BLOCK_GROUP_DUP;
6774 stripped = flags & ~stripped;
6776 /* switch duplicated blocks with raid1 */
6777 if (flags & BTRFS_BLOCK_GROUP_DUP)
6778 return stripped | BTRFS_BLOCK_GROUP_RAID1;
6780 /* turn single device chunks into raid0 */
6781 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6786 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
6787 struct btrfs_block_group_cache *shrink_block_group,
6790 struct btrfs_trans_handle *trans;
6791 u64 new_alloc_flags;
6794 spin_lock(&shrink_block_group->lock);
6795 if (btrfs_block_group_used(&shrink_block_group->item) +
6796 shrink_block_group->reserved > 0) {
6797 spin_unlock(&shrink_block_group->lock);
6799 trans = btrfs_start_transaction(root, 1);
6800 spin_lock(&shrink_block_group->lock);
6802 new_alloc_flags = update_block_group_flags(root,
6803 shrink_block_group->flags);
6804 if (new_alloc_flags != shrink_block_group->flags) {
6806 btrfs_block_group_used(&shrink_block_group->item);
6808 calc = shrink_block_group->key.offset;
6810 spin_unlock(&shrink_block_group->lock);
6812 do_chunk_alloc(trans, root->fs_info->extent_root,
6813 calc + 2 * 1024 * 1024, new_alloc_flags, force);
6815 btrfs_end_transaction(trans, root);
6817 spin_unlock(&shrink_block_group->lock);
6822 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6823 struct btrfs_block_group_cache *group)
6826 __alloc_chunk_for_shrink(root, group, 1);
6827 set_block_group_readonly(group);
6832 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
6833 struct btrfs_root *root,
6834 u64 objectid, u64 size)
6836 struct btrfs_path *path;
6837 struct btrfs_inode_item *item;
6838 struct extent_buffer *leaf;
6841 path = btrfs_alloc_path();
6845 path->leave_spinning = 1;
6846 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
6850 leaf = path->nodes[0];
6851 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
6852 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
6853 btrfs_set_inode_generation(leaf, item, 1);
6854 btrfs_set_inode_size(leaf, item, size);
6855 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
6856 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
6857 btrfs_mark_buffer_dirty(leaf);
6858 btrfs_release_path(root, path);
6860 btrfs_free_path(path);
6864 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
6865 struct btrfs_block_group_cache *group)
6867 struct inode *inode = NULL;
6868 struct btrfs_trans_handle *trans;
6869 struct btrfs_root *root;
6870 struct btrfs_key root_key;
6871 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
6874 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6875 root_key.type = BTRFS_ROOT_ITEM_KEY;
6876 root_key.offset = (u64)-1;
6877 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
6879 return ERR_CAST(root);
6881 trans = btrfs_start_transaction(root, 1);
6884 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
6888 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
6891 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
6892 group->key.offset, 0, group->key.offset,
6896 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
6897 if (inode->i_state & I_NEW) {
6898 BTRFS_I(inode)->root = root;
6899 BTRFS_I(inode)->location.objectid = objectid;
6900 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
6901 BTRFS_I(inode)->location.offset = 0;
6902 btrfs_read_locked_inode(inode);
6903 unlock_new_inode(inode);
6904 BUG_ON(is_bad_inode(inode));
6908 BTRFS_I(inode)->index_cnt = group->key.objectid;
6910 err = btrfs_orphan_add(trans, inode);
6912 btrfs_end_transaction(trans, root);
6916 inode = ERR_PTR(err);
6921 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
6924 struct btrfs_ordered_sum *sums;
6925 struct btrfs_sector_sum *sector_sum;
6926 struct btrfs_ordered_extent *ordered;
6927 struct btrfs_root *root = BTRFS_I(inode)->root;
6928 struct list_head list;
6933 INIT_LIST_HEAD(&list);
6935 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
6936 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
6938 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
6939 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
6940 disk_bytenr + len - 1, &list);
6942 while (!list_empty(&list)) {
6943 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
6944 list_del_init(&sums->list);
6946 sector_sum = sums->sums;
6947 sums->bytenr = ordered->start;
6950 while (offset < sums->len) {
6951 sector_sum->bytenr += ordered->start - disk_bytenr;
6953 offset += root->sectorsize;
6956 btrfs_add_ordered_sum(inode, ordered, sums);
6958 btrfs_put_ordered_extent(ordered);
6962 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
6964 struct btrfs_trans_handle *trans;
6965 struct btrfs_path *path;
6966 struct btrfs_fs_info *info = root->fs_info;
6967 struct extent_buffer *leaf;
6968 struct inode *reloc_inode;
6969 struct btrfs_block_group_cache *block_group;
6970 struct btrfs_key key;
6979 root = root->fs_info->extent_root;
6981 block_group = btrfs_lookup_block_group(info, group_start);
6982 BUG_ON(!block_group);
6984 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
6985 (unsigned long long)block_group->key.objectid,
6986 (unsigned long long)block_group->flags);
6988 path = btrfs_alloc_path();
6991 reloc_inode = create_reloc_inode(info, block_group);
6992 BUG_ON(IS_ERR(reloc_inode));
6994 __alloc_chunk_for_shrink(root, block_group, 1);
6995 set_block_group_readonly(block_group);
6997 btrfs_start_delalloc_inodes(info->tree_root);
6998 btrfs_wait_ordered_extents(info->tree_root, 0);
7003 key.objectid = block_group->key.objectid;
7006 cur_byte = key.objectid;
7008 trans = btrfs_start_transaction(info->tree_root, 1);
7009 btrfs_commit_transaction(trans, info->tree_root);
7011 mutex_lock(&root->fs_info->cleaner_mutex);
7012 btrfs_clean_old_snapshots(info->tree_root);
7013 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
7014 mutex_unlock(&root->fs_info->cleaner_mutex);
7016 trans = btrfs_start_transaction(info->tree_root, 1);
7017 btrfs_commit_transaction(trans, info->tree_root);
7020 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7024 leaf = path->nodes[0];
7025 nritems = btrfs_header_nritems(leaf);
7026 if (path->slots[0] >= nritems) {
7027 ret = btrfs_next_leaf(root, path);
7034 leaf = path->nodes[0];
7035 nritems = btrfs_header_nritems(leaf);
7038 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7040 if (key.objectid >= block_group->key.objectid +
7041 block_group->key.offset)
7044 if (progress && need_resched()) {
7045 btrfs_release_path(root, path);
7052 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
7053 key.objectid + key.offset <= cur_byte) {
7059 cur_byte = key.objectid + key.offset;
7060 btrfs_release_path(root, path);
7062 __alloc_chunk_for_shrink(root, block_group, 0);
7063 ret = relocate_one_extent(root, path, &key, block_group,
7069 key.objectid = cur_byte;
7074 btrfs_release_path(root, path);
7077 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
7078 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
7081 if (total_found > 0) {
7082 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
7083 (unsigned long long)total_found, pass);
7085 if (total_found == skipped && pass > 2) {
7087 reloc_inode = create_reloc_inode(info, block_group);
7093 /* delete reloc_inode */
7096 /* unpin extents in this range */
7097 trans = btrfs_start_transaction(info->tree_root, 1);
7098 btrfs_commit_transaction(trans, info->tree_root);
7100 spin_lock(&block_group->lock);
7101 WARN_ON(block_group->pinned > 0);
7102 WARN_ON(block_group->reserved > 0);
7103 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
7104 spin_unlock(&block_group->lock);
7105 btrfs_put_block_group(block_group);
7108 btrfs_free_path(path);
7113 static int find_first_block_group(struct btrfs_root *root,
7114 struct btrfs_path *path, struct btrfs_key *key)
7117 struct btrfs_key found_key;
7118 struct extent_buffer *leaf;
7121 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7126 slot = path->slots[0];
7127 leaf = path->nodes[0];
7128 if (slot >= btrfs_header_nritems(leaf)) {
7129 ret = btrfs_next_leaf(root, path);
7136 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7138 if (found_key.objectid >= key->objectid &&
7139 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7150 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7152 struct btrfs_block_group_cache *block_group;
7153 struct btrfs_space_info *space_info;
7156 spin_lock(&info->block_group_cache_lock);
7157 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7158 block_group = rb_entry(n, struct btrfs_block_group_cache,
7160 rb_erase(&block_group->cache_node,
7161 &info->block_group_cache_tree);
7162 spin_unlock(&info->block_group_cache_lock);
7164 down_write(&block_group->space_info->groups_sem);
7165 list_del(&block_group->list);
7166 up_write(&block_group->space_info->groups_sem);
7168 if (block_group->cached == BTRFS_CACHE_STARTED)
7169 wait_event(block_group->caching_q,
7170 block_group_cache_done(block_group));
7172 btrfs_remove_free_space_cache(block_group);
7174 WARN_ON(atomic_read(&block_group->count) != 1);
7177 spin_lock(&info->block_group_cache_lock);
7179 spin_unlock(&info->block_group_cache_lock);
7181 /* now that all the block groups are freed, go through and
7182 * free all the space_info structs. This is only called during
7183 * the final stages of unmount, and so we know nobody is
7184 * using them. We call synchronize_rcu() once before we start,
7185 * just to be on the safe side.
7189 while(!list_empty(&info->space_info)) {
7190 space_info = list_entry(info->space_info.next,
7191 struct btrfs_space_info,
7194 list_del(&space_info->list);
7200 int btrfs_read_block_groups(struct btrfs_root *root)
7202 struct btrfs_path *path;
7204 struct btrfs_block_group_cache *cache;
7205 struct btrfs_fs_info *info = root->fs_info;
7206 struct btrfs_space_info *space_info;
7207 struct btrfs_key key;
7208 struct btrfs_key found_key;
7209 struct extent_buffer *leaf;
7211 root = info->extent_root;
7214 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7215 path = btrfs_alloc_path();
7220 ret = find_first_block_group(root, path, &key);
7228 leaf = path->nodes[0];
7229 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7230 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7236 atomic_set(&cache->count, 1);
7237 spin_lock_init(&cache->lock);
7238 spin_lock_init(&cache->tree_lock);
7239 cache->fs_info = info;
7240 init_waitqueue_head(&cache->caching_q);
7241 INIT_LIST_HEAD(&cache->list);
7242 INIT_LIST_HEAD(&cache->cluster_list);
7245 * we only want to have 32k of ram per block group for keeping
7246 * track of free space, and if we pass 1/2 of that we want to
7247 * start converting things over to using bitmaps
7249 cache->extents_thresh = ((1024 * 32) / 2) /
7250 sizeof(struct btrfs_free_space);
7252 read_extent_buffer(leaf, &cache->item,
7253 btrfs_item_ptr_offset(leaf, path->slots[0]),
7254 sizeof(cache->item));
7255 memcpy(&cache->key, &found_key, sizeof(found_key));
7257 key.objectid = found_key.objectid + found_key.offset;
7258 btrfs_release_path(root, path);
7259 cache->flags = btrfs_block_group_flags(&cache->item);
7260 cache->sectorsize = root->sectorsize;
7262 remove_sb_from_cache(root, cache);
7265 * check for two cases, either we are full, and therefore
7266 * don't need to bother with the caching work since we won't
7267 * find any space, or we are empty, and we can just add all
7268 * the space in and be done with it. This saves us _alot_ of
7269 * time, particularly in the full case.
7271 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7272 cache->cached = BTRFS_CACHE_FINISHED;
7273 } else if (btrfs_block_group_used(&cache->item) == 0) {
7274 cache->cached = BTRFS_CACHE_FINISHED;
7275 add_new_free_space(cache, root->fs_info,
7277 found_key.objectid +
7281 ret = update_space_info(info, cache->flags, found_key.offset,
7282 btrfs_block_group_used(&cache->item),
7285 cache->space_info = space_info;
7286 down_write(&space_info->groups_sem);
7287 list_add_tail(&cache->list, &space_info->block_groups);
7288 up_write(&space_info->groups_sem);
7290 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7293 set_avail_alloc_bits(root->fs_info, cache->flags);
7294 if (btrfs_chunk_readonly(root, cache->key.objectid))
7295 set_block_group_readonly(cache);
7299 btrfs_free_path(path);
7303 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7304 struct btrfs_root *root, u64 bytes_used,
7305 u64 type, u64 chunk_objectid, u64 chunk_offset,
7309 struct btrfs_root *extent_root;
7310 struct btrfs_block_group_cache *cache;
7312 extent_root = root->fs_info->extent_root;
7314 root->fs_info->last_trans_log_full_commit = trans->transid;
7316 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7320 cache->key.objectid = chunk_offset;
7321 cache->key.offset = size;
7322 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7323 cache->sectorsize = root->sectorsize;
7326 * we only want to have 32k of ram per block group for keeping track
7327 * of free space, and if we pass 1/2 of that we want to start
7328 * converting things over to using bitmaps
7330 cache->extents_thresh = ((1024 * 32) / 2) /
7331 sizeof(struct btrfs_free_space);
7332 atomic_set(&cache->count, 1);
7333 spin_lock_init(&cache->lock);
7334 spin_lock_init(&cache->tree_lock);
7335 init_waitqueue_head(&cache->caching_q);
7336 INIT_LIST_HEAD(&cache->list);
7337 INIT_LIST_HEAD(&cache->cluster_list);
7339 btrfs_set_block_group_used(&cache->item, bytes_used);
7340 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7341 cache->flags = type;
7342 btrfs_set_block_group_flags(&cache->item, type);
7344 cache->cached = BTRFS_CACHE_FINISHED;
7345 remove_sb_from_cache(root, cache);
7347 add_new_free_space(cache, root->fs_info, chunk_offset,
7348 chunk_offset + size);
7350 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7351 &cache->space_info);
7353 down_write(&cache->space_info->groups_sem);
7354 list_add_tail(&cache->list, &cache->space_info->block_groups);
7355 up_write(&cache->space_info->groups_sem);
7357 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7360 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7361 sizeof(cache->item));
7364 set_avail_alloc_bits(extent_root->fs_info, type);
7369 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7370 struct btrfs_root *root, u64 group_start)
7372 struct btrfs_path *path;
7373 struct btrfs_block_group_cache *block_group;
7374 struct btrfs_free_cluster *cluster;
7375 struct btrfs_key key;
7378 root = root->fs_info->extent_root;
7380 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7381 BUG_ON(!block_group);
7382 BUG_ON(!block_group->ro);
7384 memcpy(&key, &block_group->key, sizeof(key));
7386 /* make sure this block group isn't part of an allocation cluster */
7387 cluster = &root->fs_info->data_alloc_cluster;
7388 spin_lock(&cluster->refill_lock);
7389 btrfs_return_cluster_to_free_space(block_group, cluster);
7390 spin_unlock(&cluster->refill_lock);
7393 * make sure this block group isn't part of a metadata
7394 * allocation cluster
7396 cluster = &root->fs_info->meta_alloc_cluster;
7397 spin_lock(&cluster->refill_lock);
7398 btrfs_return_cluster_to_free_space(block_group, cluster);
7399 spin_unlock(&cluster->refill_lock);
7401 path = btrfs_alloc_path();
7404 spin_lock(&root->fs_info->block_group_cache_lock);
7405 rb_erase(&block_group->cache_node,
7406 &root->fs_info->block_group_cache_tree);
7407 spin_unlock(&root->fs_info->block_group_cache_lock);
7409 down_write(&block_group->space_info->groups_sem);
7411 * we must use list_del_init so people can check to see if they
7412 * are still on the list after taking the semaphore
7414 list_del_init(&block_group->list);
7415 up_write(&block_group->space_info->groups_sem);
7417 if (block_group->cached == BTRFS_CACHE_STARTED)
7418 wait_event(block_group->caching_q,
7419 block_group_cache_done(block_group));
7421 btrfs_remove_free_space_cache(block_group);
7423 spin_lock(&block_group->space_info->lock);
7424 block_group->space_info->total_bytes -= block_group->key.offset;
7425 block_group->space_info->bytes_readonly -= block_group->key.offset;
7426 spin_unlock(&block_group->space_info->lock);
7428 btrfs_clear_space_info_full(root->fs_info);
7430 btrfs_put_block_group(block_group);
7431 btrfs_put_block_group(block_group);
7433 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7439 ret = btrfs_del_item(trans, root, path);
7441 btrfs_free_path(path);