2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
22 #include "transaction.h"
23 #include "print-tree.h"
26 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_key *ins_key,
30 struct btrfs_path *path, int data_size, int extend);
31 static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
33 struct extent_buffer *src, int empty);
34 static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
38 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
41 struct btrfs_path *btrfs_alloc_path(void)
43 struct btrfs_path *path;
44 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
54 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
58 if (p->nodes[i] && p->locks[i])
59 btrfs_set_lock_blocking(p->nodes[i]);
64 * reset all the locked nodes in the patch to spinning locks.
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
71 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
72 struct extent_buffer *held)
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
84 btrfs_set_lock_blocking(held);
85 btrfs_set_path_blocking(p);
88 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
89 if (p->nodes[i] && p->locks[i])
90 btrfs_clear_lock_blocking(p->nodes[i]);
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
95 btrfs_clear_lock_blocking(held);
99 /* this also releases the path */
100 void btrfs_free_path(struct btrfs_path *p)
102 btrfs_release_path(NULL, p);
103 kmem_cache_free(btrfs_path_cachep, p);
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
110 * It is safe to call this on paths that no locks or extent buffers held.
112 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
116 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
121 btrfs_tree_unlock(p->nodes[i]);
124 free_extent_buffer(p->nodes[i]);
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
139 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
141 struct extent_buffer *eb;
142 spin_lock(&root->node_lock);
144 extent_buffer_get(eb);
145 spin_unlock(&root->node_lock);
149 /* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
153 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
155 struct extent_buffer *eb;
158 eb = btrfs_root_node(root);
161 spin_lock(&root->node_lock);
162 if (eb == root->node) {
163 spin_unlock(&root->node_lock);
166 spin_unlock(&root->node_lock);
168 btrfs_tree_unlock(eb);
169 free_extent_buffer(eb);
174 /* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
178 static void add_root_to_dirty_list(struct btrfs_root *root)
180 if (root->track_dirty && list_empty(&root->dirty_list)) {
181 list_add(&root->dirty_list,
182 &root->fs_info->dirty_cowonly_roots);
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
191 int btrfs_copy_root(struct btrfs_trans_handle *trans,
192 struct btrfs_root *root,
193 struct extent_buffer *buf,
194 struct extent_buffer **cow_ret, u64 new_root_objectid)
196 struct extent_buffer *cow;
200 struct btrfs_disk_key disk_key;
202 WARN_ON(root->ref_cows && trans->transid !=
203 root->fs_info->running_transaction->transid);
204 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
206 level = btrfs_header_level(buf);
207 nritems = btrfs_header_nritems(buf);
209 btrfs_item_key(buf, &disk_key, 0);
211 btrfs_node_key(buf, &disk_key, 0);
213 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
214 new_root_objectid, &disk_key, level,
219 copy_extent_buffer(cow, buf, 0, 0, cow->len);
220 btrfs_set_header_bytenr(cow, cow->start);
221 btrfs_set_header_generation(cow, trans->transid);
222 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
223 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
224 BTRFS_HEADER_FLAG_RELOC);
225 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
226 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
228 btrfs_set_header_owner(cow, new_root_objectid);
230 write_extent_buffer(cow, root->fs_info->fsid,
231 (unsigned long)btrfs_header_fsid(cow),
234 WARN_ON(btrfs_header_generation(buf) > trans->transid);
235 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
236 ret = btrfs_inc_ref(trans, root, cow, 1);
238 ret = btrfs_inc_ref(trans, root, cow, 0);
243 btrfs_mark_buffer_dirty(cow);
249 * check if the tree block can be shared by multiple trees
251 int btrfs_block_can_be_shared(struct btrfs_root *root,
252 struct extent_buffer *buf)
255 * Tree blocks not in refernece counted trees and tree roots
256 * are never shared. If a block was allocated after the last
257 * snapshot and the block was not allocated by tree relocation,
258 * we know the block is not shared.
260 if (root->ref_cows &&
261 buf != root->node && buf != root->commit_root &&
262 (btrfs_header_generation(buf) <=
263 btrfs_root_last_snapshot(&root->root_item) ||
264 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
266 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
267 if (root->ref_cows &&
268 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
274 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
275 struct btrfs_root *root,
276 struct extent_buffer *buf,
277 struct extent_buffer *cow)
286 * Backrefs update rules:
288 * Always use full backrefs for extent pointers in tree block
289 * allocated by tree relocation.
291 * If a shared tree block is no longer referenced by its owner
292 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
293 * use full backrefs for extent pointers in tree block.
295 * If a tree block is been relocating
296 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
297 * use full backrefs for extent pointers in tree block.
298 * The reason for this is some operations (such as drop tree)
299 * are only allowed for blocks use full backrefs.
302 if (btrfs_block_can_be_shared(root, buf)) {
303 ret = btrfs_lookup_extent_info(trans, root, buf->start,
304 buf->len, &refs, &flags);
309 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
310 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
311 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
316 owner = btrfs_header_owner(buf);
317 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
318 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
321 if ((owner == root->root_key.objectid ||
322 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
323 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
324 ret = btrfs_inc_ref(trans, root, buf, 1);
327 if (root->root_key.objectid ==
328 BTRFS_TREE_RELOC_OBJECTID) {
329 ret = btrfs_dec_ref(trans, root, buf, 0);
331 ret = btrfs_inc_ref(trans, root, cow, 1);
334 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
337 if (root->root_key.objectid ==
338 BTRFS_TREE_RELOC_OBJECTID)
339 ret = btrfs_inc_ref(trans, root, cow, 1);
341 ret = btrfs_inc_ref(trans, root, cow, 0);
344 if (new_flags != 0) {
345 ret = btrfs_set_disk_extent_flags(trans, root,
352 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
353 if (root->root_key.objectid ==
354 BTRFS_TREE_RELOC_OBJECTID)
355 ret = btrfs_inc_ref(trans, root, cow, 1);
357 ret = btrfs_inc_ref(trans, root, cow, 0);
359 ret = btrfs_dec_ref(trans, root, buf, 1);
362 clean_tree_block(trans, root, buf);
368 * does the dirty work in cow of a single block. The parent block (if
369 * supplied) is updated to point to the new cow copy. The new buffer is marked
370 * dirty and returned locked. If you modify the block it needs to be marked
373 * search_start -- an allocation hint for the new block
375 * empty_size -- a hint that you plan on doing more cow. This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
379 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
380 struct btrfs_root *root,
381 struct extent_buffer *buf,
382 struct extent_buffer *parent, int parent_slot,
383 struct extent_buffer **cow_ret,
384 u64 search_start, u64 empty_size)
386 struct btrfs_disk_key disk_key;
387 struct extent_buffer *cow;
395 btrfs_assert_tree_locked(buf);
397 WARN_ON(root->ref_cows && trans->transid !=
398 root->fs_info->running_transaction->transid);
399 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
401 level = btrfs_header_level(buf);
404 btrfs_item_key(buf, &disk_key, 0);
406 btrfs_node_key(buf, &disk_key, 0);
408 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
410 parent_start = parent->start;
416 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
417 root->root_key.objectid, &disk_key,
418 level, search_start, empty_size);
422 /* cow is set to blocking by btrfs_init_new_buffer */
424 copy_extent_buffer(cow, buf, 0, 0, cow->len);
425 btrfs_set_header_bytenr(cow, cow->start);
426 btrfs_set_header_generation(cow, trans->transid);
427 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
428 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
429 BTRFS_HEADER_FLAG_RELOC);
430 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
431 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
433 btrfs_set_header_owner(cow, root->root_key.objectid);
435 write_extent_buffer(cow, root->fs_info->fsid,
436 (unsigned long)btrfs_header_fsid(cow),
439 update_ref_for_cow(trans, root, buf, cow);
441 if (buf == root->node) {
442 WARN_ON(parent && parent != buf);
443 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
444 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
445 parent_start = buf->start;
449 spin_lock(&root->node_lock);
451 extent_buffer_get(cow);
452 spin_unlock(&root->node_lock);
454 btrfs_free_extent(trans, root, buf->start, buf->len,
455 parent_start, root->root_key.objectid,
457 free_extent_buffer(buf);
458 add_root_to_dirty_list(root);
460 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
461 parent_start = parent->start;
465 WARN_ON(trans->transid != btrfs_header_generation(parent));
466 btrfs_set_node_blockptr(parent, parent_slot,
468 btrfs_set_node_ptr_generation(parent, parent_slot,
470 btrfs_mark_buffer_dirty(parent);
471 btrfs_free_extent(trans, root, buf->start, buf->len,
472 parent_start, root->root_key.objectid,
476 btrfs_tree_unlock(buf);
477 free_extent_buffer(buf);
478 btrfs_mark_buffer_dirty(cow);
483 static inline int should_cow_block(struct btrfs_trans_handle *trans,
484 struct btrfs_root *root,
485 struct extent_buffer *buf)
487 if (btrfs_header_generation(buf) == trans->transid &&
488 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
489 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
490 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
496 * cows a single block, see __btrfs_cow_block for the real work.
497 * This version of it has extra checks so that a block isn't cow'd more than
498 * once per transaction, as long as it hasn't been written yet
500 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
501 struct btrfs_root *root, struct extent_buffer *buf,
502 struct extent_buffer *parent, int parent_slot,
503 struct extent_buffer **cow_ret)
508 if (trans->transaction != root->fs_info->running_transaction) {
509 printk(KERN_CRIT "trans %llu running %llu\n",
510 (unsigned long long)trans->transid,
512 root->fs_info->running_transaction->transid);
515 if (trans->transid != root->fs_info->generation) {
516 printk(KERN_CRIT "trans %llu running %llu\n",
517 (unsigned long long)trans->transid,
518 (unsigned long long)root->fs_info->generation);
522 if (!should_cow_block(trans, root, buf)) {
527 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
530 btrfs_set_lock_blocking(parent);
531 btrfs_set_lock_blocking(buf);
533 ret = __btrfs_cow_block(trans, root, buf, parent,
534 parent_slot, cow_ret, search_start, 0);
539 * helper function for defrag to decide if two blocks pointed to by a
540 * node are actually close by
542 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
544 if (blocknr < other && other - (blocknr + blocksize) < 32768)
546 if (blocknr > other && blocknr - (other + blocksize) < 32768)
552 * compare two keys in a memcmp fashion
554 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
558 btrfs_disk_key_to_cpu(&k1, disk);
560 if (k1.objectid > k2->objectid)
562 if (k1.objectid < k2->objectid)
564 if (k1.type > k2->type)
566 if (k1.type < k2->type)
568 if (k1.offset > k2->offset)
570 if (k1.offset < k2->offset)
576 * same as comp_keys only with two btrfs_key's
578 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
580 if (k1->objectid > k2->objectid)
582 if (k1->objectid < k2->objectid)
584 if (k1->type > k2->type)
586 if (k1->type < k2->type)
588 if (k1->offset > k2->offset)
590 if (k1->offset < k2->offset)
596 * this is used by the defrag code to go through all the
597 * leaves pointed to by a node and reallocate them so that
598 * disk order is close to key order
600 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
601 struct btrfs_root *root, struct extent_buffer *parent,
602 int start_slot, int cache_only, u64 *last_ret,
603 struct btrfs_key *progress)
605 struct extent_buffer *cur;
608 u64 search_start = *last_ret;
618 int progress_passed = 0;
619 struct btrfs_disk_key disk_key;
621 parent_level = btrfs_header_level(parent);
622 if (cache_only && parent_level != 1)
625 if (trans->transaction != root->fs_info->running_transaction)
627 if (trans->transid != root->fs_info->generation)
630 parent_nritems = btrfs_header_nritems(parent);
631 blocksize = btrfs_level_size(root, parent_level - 1);
632 end_slot = parent_nritems;
634 if (parent_nritems == 1)
637 btrfs_set_lock_blocking(parent);
639 for (i = start_slot; i < end_slot; i++) {
642 if (!parent->map_token) {
643 map_extent_buffer(parent,
644 btrfs_node_key_ptr_offset(i),
645 sizeof(struct btrfs_key_ptr),
646 &parent->map_token, &parent->kaddr,
647 &parent->map_start, &parent->map_len,
650 btrfs_node_key(parent, &disk_key, i);
651 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
655 blocknr = btrfs_node_blockptr(parent, i);
656 gen = btrfs_node_ptr_generation(parent, i);
658 last_block = blocknr;
661 other = btrfs_node_blockptr(parent, i - 1);
662 close = close_blocks(blocknr, other, blocksize);
664 if (!close && i < end_slot - 2) {
665 other = btrfs_node_blockptr(parent, i + 1);
666 close = close_blocks(blocknr, other, blocksize);
669 last_block = blocknr;
672 if (parent->map_token) {
673 unmap_extent_buffer(parent, parent->map_token,
675 parent->map_token = NULL;
678 cur = btrfs_find_tree_block(root, blocknr, blocksize);
680 uptodate = btrfs_buffer_uptodate(cur, gen);
683 if (!cur || !uptodate) {
685 free_extent_buffer(cur);
689 cur = read_tree_block(root, blocknr,
691 } else if (!uptodate) {
692 btrfs_read_buffer(cur, gen);
695 if (search_start == 0)
696 search_start = last_block;
698 btrfs_tree_lock(cur);
699 btrfs_set_lock_blocking(cur);
700 err = __btrfs_cow_block(trans, root, cur, parent, i,
703 (end_slot - i) * blocksize));
705 btrfs_tree_unlock(cur);
706 free_extent_buffer(cur);
709 search_start = cur->start;
710 last_block = cur->start;
711 *last_ret = search_start;
712 btrfs_tree_unlock(cur);
713 free_extent_buffer(cur);
715 if (parent->map_token) {
716 unmap_extent_buffer(parent, parent->map_token,
718 parent->map_token = NULL;
724 * The leaf data grows from end-to-front in the node.
725 * this returns the address of the start of the last item,
726 * which is the stop of the leaf data stack
728 static inline unsigned int leaf_data_end(struct btrfs_root *root,
729 struct extent_buffer *leaf)
731 u32 nr = btrfs_header_nritems(leaf);
733 return BTRFS_LEAF_DATA_SIZE(root);
734 return btrfs_item_offset_nr(leaf, nr - 1);
738 * extra debugging checks to make sure all the items in a key are
739 * well formed and in the proper order
741 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
744 struct extent_buffer *parent = NULL;
745 struct extent_buffer *node = path->nodes[level];
746 struct btrfs_disk_key parent_key;
747 struct btrfs_disk_key node_key;
750 struct btrfs_key cpukey;
751 u32 nritems = btrfs_header_nritems(node);
753 if (path->nodes[level + 1])
754 parent = path->nodes[level + 1];
756 slot = path->slots[level];
757 BUG_ON(nritems == 0);
759 parent_slot = path->slots[level + 1];
760 btrfs_node_key(parent, &parent_key, parent_slot);
761 btrfs_node_key(node, &node_key, 0);
762 BUG_ON(memcmp(&parent_key, &node_key,
763 sizeof(struct btrfs_disk_key)));
764 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
765 btrfs_header_bytenr(node));
767 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
769 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
770 btrfs_node_key(node, &node_key, slot);
771 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
773 if (slot < nritems - 1) {
774 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
775 btrfs_node_key(node, &node_key, slot);
776 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
782 * extra checking to make sure all the items in a leaf are
783 * well formed and in the proper order
785 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
788 struct extent_buffer *leaf = path->nodes[level];
789 struct extent_buffer *parent = NULL;
791 struct btrfs_key cpukey;
792 struct btrfs_disk_key parent_key;
793 struct btrfs_disk_key leaf_key;
794 int slot = path->slots[0];
796 u32 nritems = btrfs_header_nritems(leaf);
798 if (path->nodes[level + 1])
799 parent = path->nodes[level + 1];
805 parent_slot = path->slots[level + 1];
806 btrfs_node_key(parent, &parent_key, parent_slot);
807 btrfs_item_key(leaf, &leaf_key, 0);
809 BUG_ON(memcmp(&parent_key, &leaf_key,
810 sizeof(struct btrfs_disk_key)));
811 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
812 btrfs_header_bytenr(leaf));
814 if (slot != 0 && slot < nritems - 1) {
815 btrfs_item_key(leaf, &leaf_key, slot);
816 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
817 if (comp_keys(&leaf_key, &cpukey) <= 0) {
818 btrfs_print_leaf(root, leaf);
819 printk(KERN_CRIT "slot %d offset bad key\n", slot);
822 if (btrfs_item_offset_nr(leaf, slot - 1) !=
823 btrfs_item_end_nr(leaf, slot)) {
824 btrfs_print_leaf(root, leaf);
825 printk(KERN_CRIT "slot %d offset bad\n", slot);
829 if (slot < nritems - 1) {
830 btrfs_item_key(leaf, &leaf_key, slot);
831 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
832 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
833 if (btrfs_item_offset_nr(leaf, slot) !=
834 btrfs_item_end_nr(leaf, slot + 1)) {
835 btrfs_print_leaf(root, leaf);
836 printk(KERN_CRIT "slot %d offset bad\n", slot);
840 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
841 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
845 static noinline int check_block(struct btrfs_root *root,
846 struct btrfs_path *path, int level)
850 return check_leaf(root, path, level);
851 return check_node(root, path, level);
855 * search for key in the extent_buffer. The items start at offset p,
856 * and they are item_size apart. There are 'max' items in p.
858 * the slot in the array is returned via slot, and it points to
859 * the place where you would insert key if it is not found in
862 * slot may point to max if the key is bigger than all of the keys
864 static noinline int generic_bin_search(struct extent_buffer *eb,
866 int item_size, struct btrfs_key *key,
873 struct btrfs_disk_key *tmp = NULL;
874 struct btrfs_disk_key unaligned;
875 unsigned long offset;
876 char *map_token = NULL;
878 unsigned long map_start = 0;
879 unsigned long map_len = 0;
883 mid = (low + high) / 2;
884 offset = p + mid * item_size;
886 if (!map_token || offset < map_start ||
887 (offset + sizeof(struct btrfs_disk_key)) >
888 map_start + map_len) {
890 unmap_extent_buffer(eb, map_token, KM_USER0);
894 err = map_private_extent_buffer(eb, offset,
895 sizeof(struct btrfs_disk_key),
897 &map_start, &map_len, KM_USER0);
900 tmp = (struct btrfs_disk_key *)(kaddr + offset -
903 read_extent_buffer(eb, &unaligned,
904 offset, sizeof(unaligned));
909 tmp = (struct btrfs_disk_key *)(kaddr + offset -
912 ret = comp_keys(tmp, key);
921 unmap_extent_buffer(eb, map_token, KM_USER0);
927 unmap_extent_buffer(eb, map_token, KM_USER0);
932 * simple bin_search frontend that does the right thing for
935 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
936 int level, int *slot)
939 return generic_bin_search(eb,
940 offsetof(struct btrfs_leaf, items),
941 sizeof(struct btrfs_item),
942 key, btrfs_header_nritems(eb),
945 return generic_bin_search(eb,
946 offsetof(struct btrfs_node, ptrs),
947 sizeof(struct btrfs_key_ptr),
948 key, btrfs_header_nritems(eb),
954 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
955 int level, int *slot)
957 return bin_search(eb, key, level, slot);
960 /* given a node and slot number, this reads the blocks it points to. The
961 * extent buffer is returned with a reference taken (but unlocked).
962 * NULL is returned on error.
964 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
965 struct extent_buffer *parent, int slot)
967 int level = btrfs_header_level(parent);
970 if (slot >= btrfs_header_nritems(parent))
975 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
976 btrfs_level_size(root, level - 1),
977 btrfs_node_ptr_generation(parent, slot));
981 * node level balancing, used to make sure nodes are in proper order for
982 * item deletion. We balance from the top down, so we have to make sure
983 * that a deletion won't leave an node completely empty later on.
985 static noinline int balance_level(struct btrfs_trans_handle *trans,
986 struct btrfs_root *root,
987 struct btrfs_path *path, int level)
989 struct extent_buffer *right = NULL;
990 struct extent_buffer *mid;
991 struct extent_buffer *left = NULL;
992 struct extent_buffer *parent = NULL;
996 int orig_slot = path->slots[level];
997 int err_on_enospc = 0;
1003 mid = path->nodes[level];
1005 WARN_ON(!path->locks[level]);
1006 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1008 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1010 if (level < BTRFS_MAX_LEVEL - 1)
1011 parent = path->nodes[level + 1];
1012 pslot = path->slots[level + 1];
1015 * deal with the case where there is only one pointer in the root
1016 * by promoting the node below to a root
1019 struct extent_buffer *child;
1021 if (btrfs_header_nritems(mid) != 1)
1024 /* promote the child to a root */
1025 child = read_node_slot(root, mid, 0);
1027 btrfs_tree_lock(child);
1028 btrfs_set_lock_blocking(child);
1029 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1032 spin_lock(&root->node_lock);
1034 spin_unlock(&root->node_lock);
1036 add_root_to_dirty_list(root);
1037 btrfs_tree_unlock(child);
1039 path->locks[level] = 0;
1040 path->nodes[level] = NULL;
1041 clean_tree_block(trans, root, mid);
1042 btrfs_tree_unlock(mid);
1043 /* once for the path */
1044 free_extent_buffer(mid);
1045 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
1046 0, root->root_key.objectid, level, 1);
1047 /* once for the root ptr */
1048 free_extent_buffer(mid);
1051 if (btrfs_header_nritems(mid) >
1052 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1055 if (btrfs_header_nritems(mid) > 2)
1058 if (btrfs_header_nritems(mid) < 2)
1061 left = read_node_slot(root, parent, pslot - 1);
1063 btrfs_tree_lock(left);
1064 btrfs_set_lock_blocking(left);
1065 wret = btrfs_cow_block(trans, root, left,
1066 parent, pslot - 1, &left);
1072 right = read_node_slot(root, parent, pslot + 1);
1074 btrfs_tree_lock(right);
1075 btrfs_set_lock_blocking(right);
1076 wret = btrfs_cow_block(trans, root, right,
1077 parent, pslot + 1, &right);
1084 /* first, try to make some room in the middle buffer */
1086 orig_slot += btrfs_header_nritems(left);
1087 wret = push_node_left(trans, root, left, mid, 1);
1090 if (btrfs_header_nritems(mid) < 2)
1095 * then try to empty the right most buffer into the middle
1098 wret = push_node_left(trans, root, mid, right, 1);
1099 if (wret < 0 && wret != -ENOSPC)
1101 if (btrfs_header_nritems(right) == 0) {
1102 u64 bytenr = right->start;
1103 u32 blocksize = right->len;
1105 clean_tree_block(trans, root, right);
1106 btrfs_tree_unlock(right);
1107 free_extent_buffer(right);
1109 wret = del_ptr(trans, root, path, level + 1, pslot +
1113 wret = btrfs_free_extent(trans, root, bytenr,
1115 root->root_key.objectid,
1120 struct btrfs_disk_key right_key;
1121 btrfs_node_key(right, &right_key, 0);
1122 btrfs_set_node_key(parent, &right_key, pslot + 1);
1123 btrfs_mark_buffer_dirty(parent);
1126 if (btrfs_header_nritems(mid) == 1) {
1128 * we're not allowed to leave a node with one item in the
1129 * tree during a delete. A deletion from lower in the tree
1130 * could try to delete the only pointer in this node.
1131 * So, pull some keys from the left.
1132 * There has to be a left pointer at this point because
1133 * otherwise we would have pulled some pointers from the
1137 wret = balance_node_right(trans, root, mid, left);
1143 wret = push_node_left(trans, root, left, mid, 1);
1149 if (btrfs_header_nritems(mid) == 0) {
1150 /* we've managed to empty the middle node, drop it */
1151 u64 bytenr = mid->start;
1152 u32 blocksize = mid->len;
1154 clean_tree_block(trans, root, mid);
1155 btrfs_tree_unlock(mid);
1156 free_extent_buffer(mid);
1158 wret = del_ptr(trans, root, path, level + 1, pslot);
1161 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
1162 0, root->root_key.objectid,
1167 /* update the parent key to reflect our changes */
1168 struct btrfs_disk_key mid_key;
1169 btrfs_node_key(mid, &mid_key, 0);
1170 btrfs_set_node_key(parent, &mid_key, pslot);
1171 btrfs_mark_buffer_dirty(parent);
1174 /* update the path */
1176 if (btrfs_header_nritems(left) > orig_slot) {
1177 extent_buffer_get(left);
1178 /* left was locked after cow */
1179 path->nodes[level] = left;
1180 path->slots[level + 1] -= 1;
1181 path->slots[level] = orig_slot;
1183 btrfs_tree_unlock(mid);
1184 free_extent_buffer(mid);
1187 orig_slot -= btrfs_header_nritems(left);
1188 path->slots[level] = orig_slot;
1191 /* double check we haven't messed things up */
1192 check_block(root, path, level);
1194 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1198 btrfs_tree_unlock(right);
1199 free_extent_buffer(right);
1202 if (path->nodes[level] != left)
1203 btrfs_tree_unlock(left);
1204 free_extent_buffer(left);
1209 /* Node balancing for insertion. Here we only split or push nodes around
1210 * when they are completely full. This is also done top down, so we
1211 * have to be pessimistic.
1213 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1214 struct btrfs_root *root,
1215 struct btrfs_path *path, int level)
1217 struct extent_buffer *right = NULL;
1218 struct extent_buffer *mid;
1219 struct extent_buffer *left = NULL;
1220 struct extent_buffer *parent = NULL;
1224 int orig_slot = path->slots[level];
1230 mid = path->nodes[level];
1231 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1232 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1234 if (level < BTRFS_MAX_LEVEL - 1)
1235 parent = path->nodes[level + 1];
1236 pslot = path->slots[level + 1];
1241 left = read_node_slot(root, parent, pslot - 1);
1243 /* first, try to make some room in the middle buffer */
1247 btrfs_tree_lock(left);
1248 btrfs_set_lock_blocking(left);
1250 left_nr = btrfs_header_nritems(left);
1251 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1254 ret = btrfs_cow_block(trans, root, left, parent,
1259 wret = push_node_left(trans, root,
1266 struct btrfs_disk_key disk_key;
1267 orig_slot += left_nr;
1268 btrfs_node_key(mid, &disk_key, 0);
1269 btrfs_set_node_key(parent, &disk_key, pslot);
1270 btrfs_mark_buffer_dirty(parent);
1271 if (btrfs_header_nritems(left) > orig_slot) {
1272 path->nodes[level] = left;
1273 path->slots[level + 1] -= 1;
1274 path->slots[level] = orig_slot;
1275 btrfs_tree_unlock(mid);
1276 free_extent_buffer(mid);
1279 btrfs_header_nritems(left);
1280 path->slots[level] = orig_slot;
1281 btrfs_tree_unlock(left);
1282 free_extent_buffer(left);
1286 btrfs_tree_unlock(left);
1287 free_extent_buffer(left);
1289 right = read_node_slot(root, parent, pslot + 1);
1292 * then try to empty the right most buffer into the middle
1297 btrfs_tree_lock(right);
1298 btrfs_set_lock_blocking(right);
1300 right_nr = btrfs_header_nritems(right);
1301 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1304 ret = btrfs_cow_block(trans, root, right,
1310 wret = balance_node_right(trans, root,
1317 struct btrfs_disk_key disk_key;
1319 btrfs_node_key(right, &disk_key, 0);
1320 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1321 btrfs_mark_buffer_dirty(parent);
1323 if (btrfs_header_nritems(mid) <= orig_slot) {
1324 path->nodes[level] = right;
1325 path->slots[level + 1] += 1;
1326 path->slots[level] = orig_slot -
1327 btrfs_header_nritems(mid);
1328 btrfs_tree_unlock(mid);
1329 free_extent_buffer(mid);
1331 btrfs_tree_unlock(right);
1332 free_extent_buffer(right);
1336 btrfs_tree_unlock(right);
1337 free_extent_buffer(right);
1343 * readahead one full node of leaves, finding things that are close
1344 * to the block in 'slot', and triggering ra on them.
1346 static void reada_for_search(struct btrfs_root *root,
1347 struct btrfs_path *path,
1348 int level, int slot, u64 objectid)
1350 struct extent_buffer *node;
1351 struct btrfs_disk_key disk_key;
1356 int direction = path->reada;
1357 struct extent_buffer *eb;
1365 if (!path->nodes[level])
1368 node = path->nodes[level];
1370 search = btrfs_node_blockptr(node, slot);
1371 blocksize = btrfs_level_size(root, level - 1);
1372 eb = btrfs_find_tree_block(root, search, blocksize);
1374 free_extent_buffer(eb);
1380 nritems = btrfs_header_nritems(node);
1383 if (direction < 0) {
1387 } else if (direction > 0) {
1392 if (path->reada < 0 && objectid) {
1393 btrfs_node_key(node, &disk_key, nr);
1394 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1397 search = btrfs_node_blockptr(node, nr);
1398 if ((search <= target && target - search <= 65536) ||
1399 (search > target && search - target <= 65536)) {
1400 readahead_tree_block(root, search, blocksize,
1401 btrfs_node_ptr_generation(node, nr));
1405 if ((nread > 65536 || nscan > 32))
1411 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1414 static noinline int reada_for_balance(struct btrfs_root *root,
1415 struct btrfs_path *path, int level)
1419 struct extent_buffer *parent;
1420 struct extent_buffer *eb;
1427 parent = path->nodes[level + 1];
1431 nritems = btrfs_header_nritems(parent);
1432 slot = path->slots[level + 1];
1433 blocksize = btrfs_level_size(root, level);
1436 block1 = btrfs_node_blockptr(parent, slot - 1);
1437 gen = btrfs_node_ptr_generation(parent, slot - 1);
1438 eb = btrfs_find_tree_block(root, block1, blocksize);
1439 if (eb && btrfs_buffer_uptodate(eb, gen))
1441 free_extent_buffer(eb);
1443 if (slot + 1 < nritems) {
1444 block2 = btrfs_node_blockptr(parent, slot + 1);
1445 gen = btrfs_node_ptr_generation(parent, slot + 1);
1446 eb = btrfs_find_tree_block(root, block2, blocksize);
1447 if (eb && btrfs_buffer_uptodate(eb, gen))
1449 free_extent_buffer(eb);
1451 if (block1 || block2) {
1454 /* release the whole path */
1455 btrfs_release_path(root, path);
1457 /* read the blocks */
1459 readahead_tree_block(root, block1, blocksize, 0);
1461 readahead_tree_block(root, block2, blocksize, 0);
1464 eb = read_tree_block(root, block1, blocksize, 0);
1465 free_extent_buffer(eb);
1468 eb = read_tree_block(root, block2, blocksize, 0);
1469 free_extent_buffer(eb);
1477 * when we walk down the tree, it is usually safe to unlock the higher layers
1478 * in the tree. The exceptions are when our path goes through slot 0, because
1479 * operations on the tree might require changing key pointers higher up in the
1482 * callers might also have set path->keep_locks, which tells this code to keep
1483 * the lock if the path points to the last slot in the block. This is part of
1484 * walking through the tree, and selecting the next slot in the higher block.
1486 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1487 * if lowest_unlock is 1, level 0 won't be unlocked
1489 static noinline void unlock_up(struct btrfs_path *path, int level,
1493 int skip_level = level;
1495 struct extent_buffer *t;
1497 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1498 if (!path->nodes[i])
1500 if (!path->locks[i])
1502 if (!no_skips && path->slots[i] == 0) {
1506 if (!no_skips && path->keep_locks) {
1509 nritems = btrfs_header_nritems(t);
1510 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1515 if (skip_level < i && i >= lowest_unlock)
1519 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1520 btrfs_tree_unlock(t);
1527 * This releases any locks held in the path starting at level and
1528 * going all the way up to the root.
1530 * btrfs_search_slot will keep the lock held on higher nodes in a few
1531 * corner cases, such as COW of the block at slot zero in the node. This
1532 * ignores those rules, and it should only be called when there are no
1533 * more updates to be done higher up in the tree.
1535 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1539 if (path->keep_locks)
1542 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1543 if (!path->nodes[i])
1545 if (!path->locks[i])
1547 btrfs_tree_unlock(path->nodes[i]);
1553 * helper function for btrfs_search_slot. The goal is to find a block
1554 * in cache without setting the path to blocking. If we find the block
1555 * we return zero and the path is unchanged.
1557 * If we can't find the block, we set the path blocking and do some
1558 * reada. -EAGAIN is returned and the search must be repeated.
1561 read_block_for_search(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root, struct btrfs_path *p,
1563 struct extent_buffer **eb_ret, int level, int slot,
1564 struct btrfs_key *key)
1569 struct extent_buffer *b = *eb_ret;
1570 struct extent_buffer *tmp;
1573 blocknr = btrfs_node_blockptr(b, slot);
1574 gen = btrfs_node_ptr_generation(b, slot);
1575 blocksize = btrfs_level_size(root, level - 1);
1577 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1578 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1580 * we found an up to date block without sleeping, return
1588 * reduce lock contention at high levels
1589 * of the btree by dropping locks before
1590 * we read. Don't release the lock on the current
1591 * level because we need to walk this node to figure
1592 * out which blocks to read.
1594 btrfs_unlock_up_safe(p, level + 1);
1595 btrfs_set_path_blocking(p);
1598 free_extent_buffer(tmp);
1600 reada_for_search(root, p, level, slot, key->objectid);
1602 btrfs_release_path(NULL, p);
1605 tmp = read_tree_block(root, blocknr, blocksize, gen);
1608 * If the read above didn't mark this buffer up to date,
1609 * it will never end up being up to date. Set ret to EIO now
1610 * and give up so that our caller doesn't loop forever
1613 if (!btrfs_buffer_uptodate(tmp, 0))
1615 free_extent_buffer(tmp);
1621 * helper function for btrfs_search_slot. This does all of the checks
1622 * for node-level blocks and does any balancing required based on
1625 * If no extra work was required, zero is returned. If we had to
1626 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1630 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1631 struct btrfs_root *root, struct btrfs_path *p,
1632 struct extent_buffer *b, int level, int ins_len)
1635 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1636 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1639 sret = reada_for_balance(root, p, level);
1643 btrfs_set_path_blocking(p);
1644 sret = split_node(trans, root, p, level);
1645 btrfs_clear_path_blocking(p, NULL);
1652 b = p->nodes[level];
1653 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1654 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1657 sret = reada_for_balance(root, p, level);
1661 btrfs_set_path_blocking(p);
1662 sret = balance_level(trans, root, p, level);
1663 btrfs_clear_path_blocking(p, NULL);
1669 b = p->nodes[level];
1671 btrfs_release_path(NULL, p);
1674 BUG_ON(btrfs_header_nritems(b) == 1);
1685 * look for key in the tree. path is filled in with nodes along the way
1686 * if key is found, we return zero and you can find the item in the leaf
1687 * level of the path (level 0)
1689 * If the key isn't found, the path points to the slot where it should
1690 * be inserted, and 1 is returned. If there are other errors during the
1691 * search a negative error number is returned.
1693 * if ins_len > 0, nodes and leaves will be split as we walk down the
1694 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1697 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1698 *root, struct btrfs_key *key, struct btrfs_path *p, int
1701 struct extent_buffer *b;
1706 int lowest_unlock = 1;
1707 u8 lowest_level = 0;
1709 lowest_level = p->lowest_level;
1710 WARN_ON(lowest_level && ins_len > 0);
1711 WARN_ON(p->nodes[0] != NULL);
1717 if (p->search_commit_root) {
1718 b = root->commit_root;
1719 extent_buffer_get(b);
1720 if (!p->skip_locking)
1723 if (p->skip_locking)
1724 b = btrfs_root_node(root);
1726 b = btrfs_lock_root_node(root);
1730 level = btrfs_header_level(b);
1733 * setup the path here so we can release it under lock
1734 * contention with the cow code
1736 p->nodes[level] = b;
1737 if (!p->skip_locking)
1738 p->locks[level] = 1;
1742 * if we don't really need to cow this block
1743 * then we don't want to set the path blocking,
1744 * so we test it here
1746 if (!should_cow_block(trans, root, b))
1749 btrfs_set_path_blocking(p);
1751 err = btrfs_cow_block(trans, root, b,
1752 p->nodes[level + 1],
1753 p->slots[level + 1], &b);
1755 free_extent_buffer(b);
1761 BUG_ON(!cow && ins_len);
1762 if (level != btrfs_header_level(b))
1764 level = btrfs_header_level(b);
1766 p->nodes[level] = b;
1767 if (!p->skip_locking)
1768 p->locks[level] = 1;
1770 btrfs_clear_path_blocking(p, NULL);
1773 * we have a lock on b and as long as we aren't changing
1774 * the tree, there is no way to for the items in b to change.
1775 * It is safe to drop the lock on our parent before we
1776 * go through the expensive btree search on b.
1778 * If cow is true, then we might be changing slot zero,
1779 * which may require changing the parent. So, we can't
1780 * drop the lock until after we know which slot we're
1784 btrfs_unlock_up_safe(p, level + 1);
1786 ret = check_block(root, p, level);
1792 ret = bin_search(b, key, level, &slot);
1796 if (ret && slot > 0) {
1800 p->slots[level] = slot;
1801 err = setup_nodes_for_search(trans, root, p, b, level,
1809 b = p->nodes[level];
1810 slot = p->slots[level];
1812 unlock_up(p, level, lowest_unlock);
1814 if (level == lowest_level) {
1820 err = read_block_for_search(trans, root, p,
1821 &b, level, slot, key);
1829 if (!p->skip_locking) {
1830 btrfs_clear_path_blocking(p, NULL);
1831 err = btrfs_try_spin_lock(b);
1834 btrfs_set_path_blocking(p);
1836 btrfs_clear_path_blocking(p, b);
1840 p->slots[level] = slot;
1842 btrfs_leaf_free_space(root, b) < ins_len) {
1843 btrfs_set_path_blocking(p);
1844 err = split_leaf(trans, root, key,
1845 p, ins_len, ret == 0);
1846 btrfs_clear_path_blocking(p, NULL);
1854 if (!p->search_for_split)
1855 unlock_up(p, level, lowest_unlock);
1862 * we don't really know what they plan on doing with the path
1863 * from here on, so for now just mark it as blocking
1865 if (!p->leave_spinning)
1866 btrfs_set_path_blocking(p);
1868 btrfs_release_path(root, p);
1873 * adjust the pointers going up the tree, starting at level
1874 * making sure the right key of each node is points to 'key'.
1875 * This is used after shifting pointers to the left, so it stops
1876 * fixing up pointers when a given leaf/node is not in slot 0 of the
1879 * If this fails to write a tree block, it returns -1, but continues
1880 * fixing up the blocks in ram so the tree is consistent.
1882 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1883 struct btrfs_root *root, struct btrfs_path *path,
1884 struct btrfs_disk_key *key, int level)
1888 struct extent_buffer *t;
1890 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1891 int tslot = path->slots[i];
1892 if (!path->nodes[i])
1895 btrfs_set_node_key(t, key, tslot);
1896 btrfs_mark_buffer_dirty(path->nodes[i]);
1906 * This function isn't completely safe. It's the caller's responsibility
1907 * that the new key won't break the order
1909 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1910 struct btrfs_root *root, struct btrfs_path *path,
1911 struct btrfs_key *new_key)
1913 struct btrfs_disk_key disk_key;
1914 struct extent_buffer *eb;
1917 eb = path->nodes[0];
1918 slot = path->slots[0];
1920 btrfs_item_key(eb, &disk_key, slot - 1);
1921 if (comp_keys(&disk_key, new_key) >= 0)
1924 if (slot < btrfs_header_nritems(eb) - 1) {
1925 btrfs_item_key(eb, &disk_key, slot + 1);
1926 if (comp_keys(&disk_key, new_key) <= 0)
1930 btrfs_cpu_key_to_disk(&disk_key, new_key);
1931 btrfs_set_item_key(eb, &disk_key, slot);
1932 btrfs_mark_buffer_dirty(eb);
1934 fixup_low_keys(trans, root, path, &disk_key, 1);
1939 * try to push data from one node into the next node left in the
1942 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1943 * error, and > 0 if there was no room in the left hand block.
1945 static int push_node_left(struct btrfs_trans_handle *trans,
1946 struct btrfs_root *root, struct extent_buffer *dst,
1947 struct extent_buffer *src, int empty)
1954 src_nritems = btrfs_header_nritems(src);
1955 dst_nritems = btrfs_header_nritems(dst);
1956 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1957 WARN_ON(btrfs_header_generation(src) != trans->transid);
1958 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1960 if (!empty && src_nritems <= 8)
1963 if (push_items <= 0)
1967 push_items = min(src_nritems, push_items);
1968 if (push_items < src_nritems) {
1969 /* leave at least 8 pointers in the node if
1970 * we aren't going to empty it
1972 if (src_nritems - push_items < 8) {
1973 if (push_items <= 8)
1979 push_items = min(src_nritems - 8, push_items);
1981 copy_extent_buffer(dst, src,
1982 btrfs_node_key_ptr_offset(dst_nritems),
1983 btrfs_node_key_ptr_offset(0),
1984 push_items * sizeof(struct btrfs_key_ptr));
1986 if (push_items < src_nritems) {
1987 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1988 btrfs_node_key_ptr_offset(push_items),
1989 (src_nritems - push_items) *
1990 sizeof(struct btrfs_key_ptr));
1992 btrfs_set_header_nritems(src, src_nritems - push_items);
1993 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1994 btrfs_mark_buffer_dirty(src);
1995 btrfs_mark_buffer_dirty(dst);
2001 * try to push data from one node into the next node right in the
2004 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2005 * error, and > 0 if there was no room in the right hand block.
2007 * this will only push up to 1/2 the contents of the left node over
2009 static int balance_node_right(struct btrfs_trans_handle *trans,
2010 struct btrfs_root *root,
2011 struct extent_buffer *dst,
2012 struct extent_buffer *src)
2020 WARN_ON(btrfs_header_generation(src) != trans->transid);
2021 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2023 src_nritems = btrfs_header_nritems(src);
2024 dst_nritems = btrfs_header_nritems(dst);
2025 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2026 if (push_items <= 0)
2029 if (src_nritems < 4)
2032 max_push = src_nritems / 2 + 1;
2033 /* don't try to empty the node */
2034 if (max_push >= src_nritems)
2037 if (max_push < push_items)
2038 push_items = max_push;
2040 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2041 btrfs_node_key_ptr_offset(0),
2043 sizeof(struct btrfs_key_ptr));
2045 copy_extent_buffer(dst, src,
2046 btrfs_node_key_ptr_offset(0),
2047 btrfs_node_key_ptr_offset(src_nritems - push_items),
2048 push_items * sizeof(struct btrfs_key_ptr));
2050 btrfs_set_header_nritems(src, src_nritems - push_items);
2051 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2053 btrfs_mark_buffer_dirty(src);
2054 btrfs_mark_buffer_dirty(dst);
2060 * helper function to insert a new root level in the tree.
2061 * A new node is allocated, and a single item is inserted to
2062 * point to the existing root
2064 * returns zero on success or < 0 on failure.
2066 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2067 struct btrfs_root *root,
2068 struct btrfs_path *path, int level)
2071 struct extent_buffer *lower;
2072 struct extent_buffer *c;
2073 struct extent_buffer *old;
2074 struct btrfs_disk_key lower_key;
2076 BUG_ON(path->nodes[level]);
2077 BUG_ON(path->nodes[level-1] != root->node);
2079 lower = path->nodes[level-1];
2081 btrfs_item_key(lower, &lower_key, 0);
2083 btrfs_node_key(lower, &lower_key, 0);
2085 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2086 root->root_key.objectid, &lower_key,
2087 level, root->node->start, 0);
2091 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2092 btrfs_set_header_nritems(c, 1);
2093 btrfs_set_header_level(c, level);
2094 btrfs_set_header_bytenr(c, c->start);
2095 btrfs_set_header_generation(c, trans->transid);
2096 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2097 btrfs_set_header_owner(c, root->root_key.objectid);
2099 write_extent_buffer(c, root->fs_info->fsid,
2100 (unsigned long)btrfs_header_fsid(c),
2103 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2104 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2107 btrfs_set_node_key(c, &lower_key, 0);
2108 btrfs_set_node_blockptr(c, 0, lower->start);
2109 lower_gen = btrfs_header_generation(lower);
2110 WARN_ON(lower_gen != trans->transid);
2112 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2114 btrfs_mark_buffer_dirty(c);
2116 spin_lock(&root->node_lock);
2119 spin_unlock(&root->node_lock);
2121 /* the super has an extra ref to root->node */
2122 free_extent_buffer(old);
2124 add_root_to_dirty_list(root);
2125 extent_buffer_get(c);
2126 path->nodes[level] = c;
2127 path->locks[level] = 1;
2128 path->slots[level] = 0;
2133 * worker function to insert a single pointer in a node.
2134 * the node should have enough room for the pointer already
2136 * slot and level indicate where you want the key to go, and
2137 * blocknr is the block the key points to.
2139 * returns zero on success and < 0 on any error
2141 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2142 *root, struct btrfs_path *path, struct btrfs_disk_key
2143 *key, u64 bytenr, int slot, int level)
2145 struct extent_buffer *lower;
2148 BUG_ON(!path->nodes[level]);
2149 lower = path->nodes[level];
2150 nritems = btrfs_header_nritems(lower);
2151 BUG_ON(slot > nritems);
2152 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2154 if (slot != nritems) {
2155 memmove_extent_buffer(lower,
2156 btrfs_node_key_ptr_offset(slot + 1),
2157 btrfs_node_key_ptr_offset(slot),
2158 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2160 btrfs_set_node_key(lower, key, slot);
2161 btrfs_set_node_blockptr(lower, slot, bytenr);
2162 WARN_ON(trans->transid == 0);
2163 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2164 btrfs_set_header_nritems(lower, nritems + 1);
2165 btrfs_mark_buffer_dirty(lower);
2170 * split the node at the specified level in path in two.
2171 * The path is corrected to point to the appropriate node after the split
2173 * Before splitting this tries to make some room in the node by pushing
2174 * left and right, if either one works, it returns right away.
2176 * returns 0 on success and < 0 on failure
2178 static noinline int split_node(struct btrfs_trans_handle *trans,
2179 struct btrfs_root *root,
2180 struct btrfs_path *path, int level)
2182 struct extent_buffer *c;
2183 struct extent_buffer *split;
2184 struct btrfs_disk_key disk_key;
2190 c = path->nodes[level];
2191 WARN_ON(btrfs_header_generation(c) != trans->transid);
2192 if (c == root->node) {
2193 /* trying to split the root, lets make a new one */
2194 ret = insert_new_root(trans, root, path, level + 1);
2198 ret = push_nodes_for_insert(trans, root, path, level);
2199 c = path->nodes[level];
2200 if (!ret && btrfs_header_nritems(c) <
2201 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2207 c_nritems = btrfs_header_nritems(c);
2208 mid = (c_nritems + 1) / 2;
2209 btrfs_node_key(c, &disk_key, mid);
2211 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2212 root->root_key.objectid,
2213 &disk_key, level, c->start, 0);
2215 return PTR_ERR(split);
2217 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2218 btrfs_set_header_level(split, btrfs_header_level(c));
2219 btrfs_set_header_bytenr(split, split->start);
2220 btrfs_set_header_generation(split, trans->transid);
2221 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2222 btrfs_set_header_owner(split, root->root_key.objectid);
2223 write_extent_buffer(split, root->fs_info->fsid,
2224 (unsigned long)btrfs_header_fsid(split),
2226 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2227 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2231 copy_extent_buffer(split, c,
2232 btrfs_node_key_ptr_offset(0),
2233 btrfs_node_key_ptr_offset(mid),
2234 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2235 btrfs_set_header_nritems(split, c_nritems - mid);
2236 btrfs_set_header_nritems(c, mid);
2239 btrfs_mark_buffer_dirty(c);
2240 btrfs_mark_buffer_dirty(split);
2242 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2243 path->slots[level + 1] + 1,
2248 if (path->slots[level] >= mid) {
2249 path->slots[level] -= mid;
2250 btrfs_tree_unlock(c);
2251 free_extent_buffer(c);
2252 path->nodes[level] = split;
2253 path->slots[level + 1] += 1;
2255 btrfs_tree_unlock(split);
2256 free_extent_buffer(split);
2262 * how many bytes are required to store the items in a leaf. start
2263 * and nr indicate which items in the leaf to check. This totals up the
2264 * space used both by the item structs and the item data
2266 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2269 int nritems = btrfs_header_nritems(l);
2270 int end = min(nritems, start + nr) - 1;
2274 data_len = btrfs_item_end_nr(l, start);
2275 data_len = data_len - btrfs_item_offset_nr(l, end);
2276 data_len += sizeof(struct btrfs_item) * nr;
2277 WARN_ON(data_len < 0);
2282 * The space between the end of the leaf items and
2283 * the start of the leaf data. IOW, how much room
2284 * the leaf has left for both items and data
2286 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2287 struct extent_buffer *leaf)
2289 int nritems = btrfs_header_nritems(leaf);
2291 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2293 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2294 "used %d nritems %d\n",
2295 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2296 leaf_space_used(leaf, 0, nritems), nritems);
2301 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2302 struct btrfs_root *root,
2303 struct btrfs_path *path,
2304 int data_size, int empty,
2305 struct extent_buffer *right,
2306 int free_space, u32 left_nritems)
2308 struct extent_buffer *left = path->nodes[0];
2309 struct extent_buffer *upper = path->nodes[1];
2310 struct btrfs_disk_key disk_key;
2315 struct btrfs_item *item;
2326 if (path->slots[0] >= left_nritems)
2327 push_space += data_size;
2329 slot = path->slots[1];
2330 i = left_nritems - 1;
2332 item = btrfs_item_nr(left, i);
2334 if (!empty && push_items > 0) {
2335 if (path->slots[0] > i)
2337 if (path->slots[0] == i) {
2338 int space = btrfs_leaf_free_space(root, left);
2339 if (space + push_space * 2 > free_space)
2344 if (path->slots[0] == i)
2345 push_space += data_size;
2347 if (!left->map_token) {
2348 map_extent_buffer(left, (unsigned long)item,
2349 sizeof(struct btrfs_item),
2350 &left->map_token, &left->kaddr,
2351 &left->map_start, &left->map_len,
2355 this_item_size = btrfs_item_size(left, item);
2356 if (this_item_size + sizeof(*item) + push_space > free_space)
2360 push_space += this_item_size + sizeof(*item);
2365 if (left->map_token) {
2366 unmap_extent_buffer(left, left->map_token, KM_USER1);
2367 left->map_token = NULL;
2370 if (push_items == 0)
2373 if (!empty && push_items == left_nritems)
2376 /* push left to right */
2377 right_nritems = btrfs_header_nritems(right);
2379 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2380 push_space -= leaf_data_end(root, left);
2382 /* make room in the right data area */
2383 data_end = leaf_data_end(root, right);
2384 memmove_extent_buffer(right,
2385 btrfs_leaf_data(right) + data_end - push_space,
2386 btrfs_leaf_data(right) + data_end,
2387 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2389 /* copy from the left data area */
2390 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2391 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2392 btrfs_leaf_data(left) + leaf_data_end(root, left),
2395 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2396 btrfs_item_nr_offset(0),
2397 right_nritems * sizeof(struct btrfs_item));
2399 /* copy the items from left to right */
2400 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2401 btrfs_item_nr_offset(left_nritems - push_items),
2402 push_items * sizeof(struct btrfs_item));
2404 /* update the item pointers */
2405 right_nritems += push_items;
2406 btrfs_set_header_nritems(right, right_nritems);
2407 push_space = BTRFS_LEAF_DATA_SIZE(root);
2408 for (i = 0; i < right_nritems; i++) {
2409 item = btrfs_item_nr(right, i);
2410 if (!right->map_token) {
2411 map_extent_buffer(right, (unsigned long)item,
2412 sizeof(struct btrfs_item),
2413 &right->map_token, &right->kaddr,
2414 &right->map_start, &right->map_len,
2417 push_space -= btrfs_item_size(right, item);
2418 btrfs_set_item_offset(right, item, push_space);
2421 if (right->map_token) {
2422 unmap_extent_buffer(right, right->map_token, KM_USER1);
2423 right->map_token = NULL;
2425 left_nritems -= push_items;
2426 btrfs_set_header_nritems(left, left_nritems);
2429 btrfs_mark_buffer_dirty(left);
2430 btrfs_mark_buffer_dirty(right);
2432 btrfs_item_key(right, &disk_key, 0);
2433 btrfs_set_node_key(upper, &disk_key, slot + 1);
2434 btrfs_mark_buffer_dirty(upper);
2436 /* then fixup the leaf pointer in the path */
2437 if (path->slots[0] >= left_nritems) {
2438 path->slots[0] -= left_nritems;
2439 if (btrfs_header_nritems(path->nodes[0]) == 0)
2440 clean_tree_block(trans, root, path->nodes[0]);
2441 btrfs_tree_unlock(path->nodes[0]);
2442 free_extent_buffer(path->nodes[0]);
2443 path->nodes[0] = right;
2444 path->slots[1] += 1;
2446 btrfs_tree_unlock(right);
2447 free_extent_buffer(right);
2452 btrfs_tree_unlock(right);
2453 free_extent_buffer(right);
2458 * push some data in the path leaf to the right, trying to free up at
2459 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2461 * returns 1 if the push failed because the other node didn't have enough
2462 * room, 0 if everything worked out and < 0 if there were major errors.
2464 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2465 *root, struct btrfs_path *path, int data_size,
2468 struct extent_buffer *left = path->nodes[0];
2469 struct extent_buffer *right;
2470 struct extent_buffer *upper;
2476 if (!path->nodes[1])
2479 slot = path->slots[1];
2480 upper = path->nodes[1];
2481 if (slot >= btrfs_header_nritems(upper) - 1)
2484 btrfs_assert_tree_locked(path->nodes[1]);
2486 right = read_node_slot(root, upper, slot + 1);
2487 btrfs_tree_lock(right);
2488 btrfs_set_lock_blocking(right);
2490 free_space = btrfs_leaf_free_space(root, right);
2491 if (free_space < data_size)
2494 /* cow and double check */
2495 ret = btrfs_cow_block(trans, root, right, upper,
2500 free_space = btrfs_leaf_free_space(root, right);
2501 if (free_space < data_size)
2504 left_nritems = btrfs_header_nritems(left);
2505 if (left_nritems == 0)
2508 return __push_leaf_right(trans, root, path, data_size, empty,
2509 right, free_space, left_nritems);
2511 btrfs_tree_unlock(right);
2512 free_extent_buffer(right);
2517 * push some data in the path leaf to the left, trying to free up at
2518 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2520 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2521 struct btrfs_root *root,
2522 struct btrfs_path *path, int data_size,
2523 int empty, struct extent_buffer *left,
2524 int free_space, int right_nritems)
2526 struct btrfs_disk_key disk_key;
2527 struct extent_buffer *right = path->nodes[0];
2532 struct btrfs_item *item;
2533 u32 old_left_nritems;
2538 u32 old_left_item_size;
2540 slot = path->slots[1];
2545 nr = right_nritems - 1;
2547 for (i = 0; i < nr; i++) {
2548 item = btrfs_item_nr(right, i);
2549 if (!right->map_token) {
2550 map_extent_buffer(right, (unsigned long)item,
2551 sizeof(struct btrfs_item),
2552 &right->map_token, &right->kaddr,
2553 &right->map_start, &right->map_len,
2557 if (!empty && push_items > 0) {
2558 if (path->slots[0] < i)
2560 if (path->slots[0] == i) {
2561 int space = btrfs_leaf_free_space(root, right);
2562 if (space + push_space * 2 > free_space)
2567 if (path->slots[0] == i)
2568 push_space += data_size;
2570 this_item_size = btrfs_item_size(right, item);
2571 if (this_item_size + sizeof(*item) + push_space > free_space)
2575 push_space += this_item_size + sizeof(*item);
2578 if (right->map_token) {
2579 unmap_extent_buffer(right, right->map_token, KM_USER1);
2580 right->map_token = NULL;
2583 if (push_items == 0) {
2587 if (!empty && push_items == btrfs_header_nritems(right))
2590 /* push data from right to left */
2591 copy_extent_buffer(left, right,
2592 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2593 btrfs_item_nr_offset(0),
2594 push_items * sizeof(struct btrfs_item));
2596 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2597 btrfs_item_offset_nr(right, push_items - 1);
2599 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2600 leaf_data_end(root, left) - push_space,
2601 btrfs_leaf_data(right) +
2602 btrfs_item_offset_nr(right, push_items - 1),
2604 old_left_nritems = btrfs_header_nritems(left);
2605 BUG_ON(old_left_nritems <= 0);
2607 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2608 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2611 item = btrfs_item_nr(left, i);
2612 if (!left->map_token) {
2613 map_extent_buffer(left, (unsigned long)item,
2614 sizeof(struct btrfs_item),
2615 &left->map_token, &left->kaddr,
2616 &left->map_start, &left->map_len,
2620 ioff = btrfs_item_offset(left, item);
2621 btrfs_set_item_offset(left, item,
2622 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2624 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2625 if (left->map_token) {
2626 unmap_extent_buffer(left, left->map_token, KM_USER1);
2627 left->map_token = NULL;
2630 /* fixup right node */
2631 if (push_items > right_nritems) {
2632 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2637 if (push_items < right_nritems) {
2638 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2639 leaf_data_end(root, right);
2640 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2641 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2642 btrfs_leaf_data(right) +
2643 leaf_data_end(root, right), push_space);
2645 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2646 btrfs_item_nr_offset(push_items),
2647 (btrfs_header_nritems(right) - push_items) *
2648 sizeof(struct btrfs_item));
2650 right_nritems -= push_items;
2651 btrfs_set_header_nritems(right, right_nritems);
2652 push_space = BTRFS_LEAF_DATA_SIZE(root);
2653 for (i = 0; i < right_nritems; i++) {
2654 item = btrfs_item_nr(right, i);
2656 if (!right->map_token) {
2657 map_extent_buffer(right, (unsigned long)item,
2658 sizeof(struct btrfs_item),
2659 &right->map_token, &right->kaddr,
2660 &right->map_start, &right->map_len,
2664 push_space = push_space - btrfs_item_size(right, item);
2665 btrfs_set_item_offset(right, item, push_space);
2667 if (right->map_token) {
2668 unmap_extent_buffer(right, right->map_token, KM_USER1);
2669 right->map_token = NULL;
2672 btrfs_mark_buffer_dirty(left);
2674 btrfs_mark_buffer_dirty(right);
2676 btrfs_item_key(right, &disk_key, 0);
2677 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2681 /* then fixup the leaf pointer in the path */
2682 if (path->slots[0] < push_items) {
2683 path->slots[0] += old_left_nritems;
2684 if (btrfs_header_nritems(path->nodes[0]) == 0)
2685 clean_tree_block(trans, root, path->nodes[0]);
2686 btrfs_tree_unlock(path->nodes[0]);
2687 free_extent_buffer(path->nodes[0]);
2688 path->nodes[0] = left;
2689 path->slots[1] -= 1;
2691 btrfs_tree_unlock(left);
2692 free_extent_buffer(left);
2693 path->slots[0] -= push_items;
2695 BUG_ON(path->slots[0] < 0);
2698 btrfs_tree_unlock(left);
2699 free_extent_buffer(left);
2704 * push some data in the path leaf to the left, trying to free up at
2705 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2707 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2708 *root, struct btrfs_path *path, int data_size,
2711 struct extent_buffer *right = path->nodes[0];
2712 struct extent_buffer *left;
2718 slot = path->slots[1];
2721 if (!path->nodes[1])
2724 right_nritems = btrfs_header_nritems(right);
2725 if (right_nritems == 0)
2728 btrfs_assert_tree_locked(path->nodes[1]);
2730 left = read_node_slot(root, path->nodes[1], slot - 1);
2731 btrfs_tree_lock(left);
2732 btrfs_set_lock_blocking(left);
2734 free_space = btrfs_leaf_free_space(root, left);
2735 if (free_space < data_size) {
2740 /* cow and double check */
2741 ret = btrfs_cow_block(trans, root, left,
2742 path->nodes[1], slot - 1, &left);
2744 /* we hit -ENOSPC, but it isn't fatal here */
2749 free_space = btrfs_leaf_free_space(root, left);
2750 if (free_space < data_size) {
2755 return __push_leaf_left(trans, root, path, data_size,
2756 empty, left, free_space, right_nritems);
2758 btrfs_tree_unlock(left);
2759 free_extent_buffer(left);
2764 * split the path's leaf in two, making sure there is at least data_size
2765 * available for the resulting leaf level of the path.
2767 * returns 0 if all went well and < 0 on failure.
2769 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2770 struct btrfs_root *root,
2771 struct btrfs_path *path,
2772 struct extent_buffer *l,
2773 struct extent_buffer *right,
2774 int slot, int mid, int nritems)
2781 struct btrfs_disk_key disk_key;
2783 nritems = nritems - mid;
2784 btrfs_set_header_nritems(right, nritems);
2785 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2787 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2788 btrfs_item_nr_offset(mid),
2789 nritems * sizeof(struct btrfs_item));
2791 copy_extent_buffer(right, l,
2792 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2793 data_copy_size, btrfs_leaf_data(l) +
2794 leaf_data_end(root, l), data_copy_size);
2796 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2797 btrfs_item_end_nr(l, mid);
2799 for (i = 0; i < nritems; i++) {
2800 struct btrfs_item *item = btrfs_item_nr(right, i);
2803 if (!right->map_token) {
2804 map_extent_buffer(right, (unsigned long)item,
2805 sizeof(struct btrfs_item),
2806 &right->map_token, &right->kaddr,
2807 &right->map_start, &right->map_len,
2811 ioff = btrfs_item_offset(right, item);
2812 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2815 if (right->map_token) {
2816 unmap_extent_buffer(right, right->map_token, KM_USER1);
2817 right->map_token = NULL;
2820 btrfs_set_header_nritems(l, mid);
2822 btrfs_item_key(right, &disk_key, 0);
2823 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2824 path->slots[1] + 1, 1);
2828 btrfs_mark_buffer_dirty(right);
2829 btrfs_mark_buffer_dirty(l);
2830 BUG_ON(path->slots[0] != slot);
2833 btrfs_tree_unlock(path->nodes[0]);
2834 free_extent_buffer(path->nodes[0]);
2835 path->nodes[0] = right;
2836 path->slots[0] -= mid;
2837 path->slots[1] += 1;
2839 btrfs_tree_unlock(right);
2840 free_extent_buffer(right);
2843 BUG_ON(path->slots[0] < 0);
2849 * split the path's leaf in two, making sure there is at least data_size
2850 * available for the resulting leaf level of the path.
2852 * returns 0 if all went well and < 0 on failure.
2854 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2855 struct btrfs_root *root,
2856 struct btrfs_key *ins_key,
2857 struct btrfs_path *path, int data_size,
2860 struct btrfs_disk_key disk_key;
2861 struct extent_buffer *l;
2865 struct extent_buffer *right;
2869 int num_doubles = 0;
2871 /* first try to make some room by pushing left and right */
2872 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2873 wret = push_leaf_right(trans, root, path, data_size, 0);
2877 wret = push_leaf_left(trans, root, path, data_size, 0);
2883 /* did the pushes work? */
2884 if (btrfs_leaf_free_space(root, l) >= data_size)
2888 if (!path->nodes[1]) {
2889 ret = insert_new_root(trans, root, path, 1);
2896 slot = path->slots[0];
2897 nritems = btrfs_header_nritems(l);
2898 mid = (nritems + 1) / 2;
2902 leaf_space_used(l, mid, nritems - mid) + data_size >
2903 BTRFS_LEAF_DATA_SIZE(root)) {
2904 if (slot >= nritems) {
2908 if (mid != nritems &&
2909 leaf_space_used(l, mid, nritems - mid) +
2910 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2916 if (leaf_space_used(l, 0, mid) + data_size >
2917 BTRFS_LEAF_DATA_SIZE(root)) {
2918 if (!extend && data_size && slot == 0) {
2920 } else if ((extend || !data_size) && slot == 0) {
2924 if (mid != nritems &&
2925 leaf_space_used(l, mid, nritems - mid) +
2926 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2934 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2936 btrfs_item_key(l, &disk_key, mid);
2938 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2939 root->root_key.objectid,
2940 &disk_key, 0, l->start, 0);
2941 if (IS_ERR(right)) {
2943 return PTR_ERR(right);
2946 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2947 btrfs_set_header_bytenr(right, right->start);
2948 btrfs_set_header_generation(right, trans->transid);
2949 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2950 btrfs_set_header_owner(right, root->root_key.objectid);
2951 btrfs_set_header_level(right, 0);
2952 write_extent_buffer(right, root->fs_info->fsid,
2953 (unsigned long)btrfs_header_fsid(right),
2956 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2957 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2962 btrfs_set_header_nritems(right, 0);
2963 wret = insert_ptr(trans, root, path,
2964 &disk_key, right->start,
2965 path->slots[1] + 1, 1);
2969 btrfs_tree_unlock(path->nodes[0]);
2970 free_extent_buffer(path->nodes[0]);
2971 path->nodes[0] = right;
2973 path->slots[1] += 1;
2975 btrfs_set_header_nritems(right, 0);
2976 wret = insert_ptr(trans, root, path,
2982 btrfs_tree_unlock(path->nodes[0]);
2983 free_extent_buffer(path->nodes[0]);
2984 path->nodes[0] = right;
2986 if (path->slots[1] == 0) {
2987 wret = fixup_low_keys(trans, root,
2988 path, &disk_key, 1);
2993 btrfs_mark_buffer_dirty(right);
2997 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3001 BUG_ON(num_doubles != 0);
3010 * This function splits a single item into two items,
3011 * giving 'new_key' to the new item and splitting the
3012 * old one at split_offset (from the start of the item).
3014 * The path may be released by this operation. After
3015 * the split, the path is pointing to the old item. The
3016 * new item is going to be in the same node as the old one.
3018 * Note, the item being split must be smaller enough to live alone on
3019 * a tree block with room for one extra struct btrfs_item
3021 * This allows us to split the item in place, keeping a lock on the
3022 * leaf the entire time.
3024 int btrfs_split_item(struct btrfs_trans_handle *trans,
3025 struct btrfs_root *root,
3026 struct btrfs_path *path,
3027 struct btrfs_key *new_key,
3028 unsigned long split_offset)
3031 struct extent_buffer *leaf;
3032 struct btrfs_key orig_key;
3033 struct btrfs_item *item;
3034 struct btrfs_item *new_item;
3039 struct btrfs_disk_key disk_key;
3042 leaf = path->nodes[0];
3043 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
3044 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
3047 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3048 btrfs_release_path(root, path);
3050 path->search_for_split = 1;
3051 path->keep_locks = 1;
3053 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
3054 path->search_for_split = 0;
3056 /* if our item isn't there or got smaller, return now */
3057 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
3059 path->keep_locks = 0;
3063 btrfs_set_path_blocking(path);
3064 ret = split_leaf(trans, root, &orig_key, path,
3065 sizeof(struct btrfs_item), 1);
3066 path->keep_locks = 0;
3069 btrfs_unlock_up_safe(path, 1);
3070 leaf = path->nodes[0];
3071 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3075 * make sure any changes to the path from split_leaf leave it
3076 * in a blocking state
3078 btrfs_set_path_blocking(path);
3080 item = btrfs_item_nr(leaf, path->slots[0]);
3081 orig_offset = btrfs_item_offset(leaf, item);
3082 item_size = btrfs_item_size(leaf, item);
3084 buf = kmalloc(item_size, GFP_NOFS);
3085 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3086 path->slots[0]), item_size);
3087 slot = path->slots[0] + 1;
3088 leaf = path->nodes[0];
3090 nritems = btrfs_header_nritems(leaf);
3092 if (slot != nritems) {
3093 /* shift the items */
3094 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3095 btrfs_item_nr_offset(slot),
3096 (nritems - slot) * sizeof(struct btrfs_item));
3100 btrfs_cpu_key_to_disk(&disk_key, new_key);
3101 btrfs_set_item_key(leaf, &disk_key, slot);
3103 new_item = btrfs_item_nr(leaf, slot);
3105 btrfs_set_item_offset(leaf, new_item, orig_offset);
3106 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3108 btrfs_set_item_offset(leaf, item,
3109 orig_offset + item_size - split_offset);
3110 btrfs_set_item_size(leaf, item, split_offset);
3112 btrfs_set_header_nritems(leaf, nritems + 1);
3114 /* write the data for the start of the original item */
3115 write_extent_buffer(leaf, buf,
3116 btrfs_item_ptr_offset(leaf, path->slots[0]),
3119 /* write the data for the new item */
3120 write_extent_buffer(leaf, buf + split_offset,
3121 btrfs_item_ptr_offset(leaf, slot),
3122 item_size - split_offset);
3123 btrfs_mark_buffer_dirty(leaf);
3126 if (btrfs_leaf_free_space(root, leaf) < 0) {
3127 btrfs_print_leaf(root, leaf);
3135 * make the item pointed to by the path smaller. new_size indicates
3136 * how small to make it, and from_end tells us if we just chop bytes
3137 * off the end of the item or if we shift the item to chop bytes off
3140 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3141 struct btrfs_root *root,
3142 struct btrfs_path *path,
3143 u32 new_size, int from_end)
3148 struct extent_buffer *leaf;
3149 struct btrfs_item *item;
3151 unsigned int data_end;
3152 unsigned int old_data_start;
3153 unsigned int old_size;
3154 unsigned int size_diff;
3157 slot_orig = path->slots[0];
3158 leaf = path->nodes[0];
3159 slot = path->slots[0];
3161 old_size = btrfs_item_size_nr(leaf, slot);
3162 if (old_size == new_size)
3165 nritems = btrfs_header_nritems(leaf);
3166 data_end = leaf_data_end(root, leaf);
3168 old_data_start = btrfs_item_offset_nr(leaf, slot);
3170 size_diff = old_size - new_size;
3173 BUG_ON(slot >= nritems);
3176 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3178 /* first correct the data pointers */
3179 for (i = slot; i < nritems; i++) {
3181 item = btrfs_item_nr(leaf, i);
3183 if (!leaf->map_token) {
3184 map_extent_buffer(leaf, (unsigned long)item,
3185 sizeof(struct btrfs_item),
3186 &leaf->map_token, &leaf->kaddr,
3187 &leaf->map_start, &leaf->map_len,
3191 ioff = btrfs_item_offset(leaf, item);
3192 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3195 if (leaf->map_token) {
3196 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3197 leaf->map_token = NULL;
3200 /* shift the data */
3202 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3203 data_end + size_diff, btrfs_leaf_data(leaf) +
3204 data_end, old_data_start + new_size - data_end);
3206 struct btrfs_disk_key disk_key;
3209 btrfs_item_key(leaf, &disk_key, slot);
3211 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3213 struct btrfs_file_extent_item *fi;
3215 fi = btrfs_item_ptr(leaf, slot,
3216 struct btrfs_file_extent_item);
3217 fi = (struct btrfs_file_extent_item *)(
3218 (unsigned long)fi - size_diff);
3220 if (btrfs_file_extent_type(leaf, fi) ==
3221 BTRFS_FILE_EXTENT_INLINE) {
3222 ptr = btrfs_item_ptr_offset(leaf, slot);
3223 memmove_extent_buffer(leaf, ptr,
3225 offsetof(struct btrfs_file_extent_item,
3230 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3231 data_end + size_diff, btrfs_leaf_data(leaf) +
3232 data_end, old_data_start - data_end);
3234 offset = btrfs_disk_key_offset(&disk_key);
3235 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3236 btrfs_set_item_key(leaf, &disk_key, slot);
3238 fixup_low_keys(trans, root, path, &disk_key, 1);
3241 item = btrfs_item_nr(leaf, slot);
3242 btrfs_set_item_size(leaf, item, new_size);
3243 btrfs_mark_buffer_dirty(leaf);
3246 if (btrfs_leaf_free_space(root, leaf) < 0) {
3247 btrfs_print_leaf(root, leaf);
3254 * make the item pointed to by the path bigger, data_size is the new size.
3256 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3257 struct btrfs_root *root, struct btrfs_path *path,
3263 struct extent_buffer *leaf;
3264 struct btrfs_item *item;
3266 unsigned int data_end;
3267 unsigned int old_data;
3268 unsigned int old_size;
3271 slot_orig = path->slots[0];
3272 leaf = path->nodes[0];
3274 nritems = btrfs_header_nritems(leaf);
3275 data_end = leaf_data_end(root, leaf);
3277 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3278 btrfs_print_leaf(root, leaf);
3281 slot = path->slots[0];
3282 old_data = btrfs_item_end_nr(leaf, slot);
3285 if (slot >= nritems) {
3286 btrfs_print_leaf(root, leaf);
3287 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3293 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3295 /* first correct the data pointers */
3296 for (i = slot; i < nritems; i++) {
3298 item = btrfs_item_nr(leaf, i);
3300 if (!leaf->map_token) {
3301 map_extent_buffer(leaf, (unsigned long)item,
3302 sizeof(struct btrfs_item),
3303 &leaf->map_token, &leaf->kaddr,
3304 &leaf->map_start, &leaf->map_len,
3307 ioff = btrfs_item_offset(leaf, item);
3308 btrfs_set_item_offset(leaf, item, ioff - data_size);
3311 if (leaf->map_token) {
3312 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3313 leaf->map_token = NULL;
3316 /* shift the data */
3317 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3318 data_end - data_size, btrfs_leaf_data(leaf) +
3319 data_end, old_data - data_end);
3321 data_end = old_data;
3322 old_size = btrfs_item_size_nr(leaf, slot);
3323 item = btrfs_item_nr(leaf, slot);
3324 btrfs_set_item_size(leaf, item, old_size + data_size);
3325 btrfs_mark_buffer_dirty(leaf);
3328 if (btrfs_leaf_free_space(root, leaf) < 0) {
3329 btrfs_print_leaf(root, leaf);
3336 * Given a key and some data, insert items into the tree.
3337 * This does all the path init required, making room in the tree if needed.
3338 * Returns the number of keys that were inserted.
3340 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3341 struct btrfs_root *root,
3342 struct btrfs_path *path,
3343 struct btrfs_key *cpu_key, u32 *data_size,
3346 struct extent_buffer *leaf;
3347 struct btrfs_item *item;
3354 unsigned int data_end;
3355 struct btrfs_disk_key disk_key;
3356 struct btrfs_key found_key;
3358 for (i = 0; i < nr; i++) {
3359 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3360 BTRFS_LEAF_DATA_SIZE(root)) {
3364 total_data += data_size[i];
3365 total_size += data_size[i] + sizeof(struct btrfs_item);
3369 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3375 leaf = path->nodes[0];
3377 nritems = btrfs_header_nritems(leaf);
3378 data_end = leaf_data_end(root, leaf);
3380 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3381 for (i = nr; i >= 0; i--) {
3382 total_data -= data_size[i];
3383 total_size -= data_size[i] + sizeof(struct btrfs_item);
3384 if (total_size < btrfs_leaf_free_space(root, leaf))
3390 slot = path->slots[0];
3393 if (slot != nritems) {
3394 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3396 item = btrfs_item_nr(leaf, slot);
3397 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3399 /* figure out how many keys we can insert in here */
3400 total_data = data_size[0];
3401 for (i = 1; i < nr; i++) {
3402 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3404 total_data += data_size[i];
3408 if (old_data < data_end) {
3409 btrfs_print_leaf(root, leaf);
3410 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3411 slot, old_data, data_end);
3415 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3417 /* first correct the data pointers */
3418 WARN_ON(leaf->map_token);
3419 for (i = slot; i < nritems; i++) {
3422 item = btrfs_item_nr(leaf, i);
3423 if (!leaf->map_token) {
3424 map_extent_buffer(leaf, (unsigned long)item,
3425 sizeof(struct btrfs_item),
3426 &leaf->map_token, &leaf->kaddr,
3427 &leaf->map_start, &leaf->map_len,
3431 ioff = btrfs_item_offset(leaf, item);
3432 btrfs_set_item_offset(leaf, item, ioff - total_data);
3434 if (leaf->map_token) {
3435 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3436 leaf->map_token = NULL;
3439 /* shift the items */
3440 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3441 btrfs_item_nr_offset(slot),
3442 (nritems - slot) * sizeof(struct btrfs_item));
3444 /* shift the data */
3445 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3446 data_end - total_data, btrfs_leaf_data(leaf) +
3447 data_end, old_data - data_end);
3448 data_end = old_data;
3451 * this sucks but it has to be done, if we are inserting at
3452 * the end of the leaf only insert 1 of the items, since we
3453 * have no way of knowing whats on the next leaf and we'd have
3454 * to drop our current locks to figure it out
3459 /* setup the item for the new data */
3460 for (i = 0; i < nr; i++) {
3461 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3462 btrfs_set_item_key(leaf, &disk_key, slot + i);
3463 item = btrfs_item_nr(leaf, slot + i);
3464 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3465 data_end -= data_size[i];
3466 btrfs_set_item_size(leaf, item, data_size[i]);
3468 btrfs_set_header_nritems(leaf, nritems + nr);
3469 btrfs_mark_buffer_dirty(leaf);
3473 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3474 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3477 if (btrfs_leaf_free_space(root, leaf) < 0) {
3478 btrfs_print_leaf(root, leaf);
3488 * this is a helper for btrfs_insert_empty_items, the main goal here is
3489 * to save stack depth by doing the bulk of the work in a function
3490 * that doesn't call btrfs_search_slot
3492 static noinline_for_stack int
3493 setup_items_for_insert(struct btrfs_trans_handle *trans,
3494 struct btrfs_root *root, struct btrfs_path *path,
3495 struct btrfs_key *cpu_key, u32 *data_size,
3496 u32 total_data, u32 total_size, int nr)
3498 struct btrfs_item *item;
3501 unsigned int data_end;
3502 struct btrfs_disk_key disk_key;
3504 struct extent_buffer *leaf;
3507 leaf = path->nodes[0];
3508 slot = path->slots[0];
3510 nritems = btrfs_header_nritems(leaf);
3511 data_end = leaf_data_end(root, leaf);
3513 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3514 btrfs_print_leaf(root, leaf);
3515 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3516 total_size, btrfs_leaf_free_space(root, leaf));
3520 if (slot != nritems) {
3521 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3523 if (old_data < data_end) {
3524 btrfs_print_leaf(root, leaf);
3525 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3526 slot, old_data, data_end);
3530 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3532 /* first correct the data pointers */
3533 WARN_ON(leaf->map_token);
3534 for (i = slot; i < nritems; i++) {
3537 item = btrfs_item_nr(leaf, i);
3538 if (!leaf->map_token) {
3539 map_extent_buffer(leaf, (unsigned long)item,
3540 sizeof(struct btrfs_item),
3541 &leaf->map_token, &leaf->kaddr,
3542 &leaf->map_start, &leaf->map_len,
3546 ioff = btrfs_item_offset(leaf, item);
3547 btrfs_set_item_offset(leaf, item, ioff - total_data);
3549 if (leaf->map_token) {
3550 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3551 leaf->map_token = NULL;
3554 /* shift the items */
3555 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3556 btrfs_item_nr_offset(slot),
3557 (nritems - slot) * sizeof(struct btrfs_item));
3559 /* shift the data */
3560 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3561 data_end - total_data, btrfs_leaf_data(leaf) +
3562 data_end, old_data - data_end);
3563 data_end = old_data;
3566 /* setup the item for the new data */
3567 for (i = 0; i < nr; i++) {
3568 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3569 btrfs_set_item_key(leaf, &disk_key, slot + i);
3570 item = btrfs_item_nr(leaf, slot + i);
3571 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3572 data_end -= data_size[i];
3573 btrfs_set_item_size(leaf, item, data_size[i]);
3576 btrfs_set_header_nritems(leaf, nritems + nr);
3580 struct btrfs_disk_key disk_key;
3581 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3582 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3584 btrfs_unlock_up_safe(path, 1);
3585 btrfs_mark_buffer_dirty(leaf);
3587 if (btrfs_leaf_free_space(root, leaf) < 0) {
3588 btrfs_print_leaf(root, leaf);
3595 * Given a key and some data, insert items into the tree.
3596 * This does all the path init required, making room in the tree if needed.
3598 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3599 struct btrfs_root *root,
3600 struct btrfs_path *path,
3601 struct btrfs_key *cpu_key, u32 *data_size,
3604 struct extent_buffer *leaf;
3611 for (i = 0; i < nr; i++)
3612 total_data += data_size[i];
3614 total_size = total_data + (nr * sizeof(struct btrfs_item));
3615 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3621 leaf = path->nodes[0];
3622 slot = path->slots[0];
3625 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3626 total_data, total_size, nr);
3633 * Given a key and some data, insert an item into the tree.
3634 * This does all the path init required, making room in the tree if needed.
3636 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3637 *root, struct btrfs_key *cpu_key, void *data, u32
3641 struct btrfs_path *path;
3642 struct extent_buffer *leaf;
3645 path = btrfs_alloc_path();
3647 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3649 leaf = path->nodes[0];
3650 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3651 write_extent_buffer(leaf, data, ptr, data_size);
3652 btrfs_mark_buffer_dirty(leaf);
3654 btrfs_free_path(path);
3659 * delete the pointer from a given node.
3661 * the tree should have been previously balanced so the deletion does not
3664 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3665 struct btrfs_path *path, int level, int slot)
3667 struct extent_buffer *parent = path->nodes[level];
3672 nritems = btrfs_header_nritems(parent);
3673 if (slot != nritems - 1) {
3674 memmove_extent_buffer(parent,
3675 btrfs_node_key_ptr_offset(slot),
3676 btrfs_node_key_ptr_offset(slot + 1),
3677 sizeof(struct btrfs_key_ptr) *
3678 (nritems - slot - 1));
3681 btrfs_set_header_nritems(parent, nritems);
3682 if (nritems == 0 && parent == root->node) {
3683 BUG_ON(btrfs_header_level(root->node) != 1);
3684 /* just turn the root into a leaf and break */
3685 btrfs_set_header_level(root->node, 0);
3686 } else if (slot == 0) {
3687 struct btrfs_disk_key disk_key;
3689 btrfs_node_key(parent, &disk_key, 0);
3690 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3694 btrfs_mark_buffer_dirty(parent);
3699 * a helper function to delete the leaf pointed to by path->slots[1] and
3702 * This deletes the pointer in path->nodes[1] and frees the leaf
3703 * block extent. zero is returned if it all worked out, < 0 otherwise.
3705 * The path must have already been setup for deleting the leaf, including
3706 * all the proper balancing. path->nodes[1] must be locked.
3708 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3709 struct btrfs_root *root,
3710 struct btrfs_path *path,
3711 struct extent_buffer *leaf)
3715 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3716 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3721 * btrfs_free_extent is expensive, we want to make sure we
3722 * aren't holding any locks when we call it
3724 btrfs_unlock_up_safe(path, 0);
3726 ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
3727 0, root->root_key.objectid, 0, 0);
3731 * delete the item at the leaf level in path. If that empties
3732 * the leaf, remove it from the tree
3734 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3735 struct btrfs_path *path, int slot, int nr)
3737 struct extent_buffer *leaf;
3738 struct btrfs_item *item;
3746 leaf = path->nodes[0];
3747 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3749 for (i = 0; i < nr; i++)
3750 dsize += btrfs_item_size_nr(leaf, slot + i);
3752 nritems = btrfs_header_nritems(leaf);
3754 if (slot + nr != nritems) {
3755 int data_end = leaf_data_end(root, leaf);
3757 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3759 btrfs_leaf_data(leaf) + data_end,
3760 last_off - data_end);
3762 for (i = slot + nr; i < nritems; i++) {
3765 item = btrfs_item_nr(leaf, i);
3766 if (!leaf->map_token) {
3767 map_extent_buffer(leaf, (unsigned long)item,
3768 sizeof(struct btrfs_item),
3769 &leaf->map_token, &leaf->kaddr,
3770 &leaf->map_start, &leaf->map_len,
3773 ioff = btrfs_item_offset(leaf, item);
3774 btrfs_set_item_offset(leaf, item, ioff + dsize);
3777 if (leaf->map_token) {
3778 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3779 leaf->map_token = NULL;
3782 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3783 btrfs_item_nr_offset(slot + nr),
3784 sizeof(struct btrfs_item) *
3785 (nritems - slot - nr));
3787 btrfs_set_header_nritems(leaf, nritems - nr);
3790 /* delete the leaf if we've emptied it */
3792 if (leaf == root->node) {
3793 btrfs_set_header_level(leaf, 0);
3795 ret = btrfs_del_leaf(trans, root, path, leaf);
3799 int used = leaf_space_used(leaf, 0, nritems);
3801 struct btrfs_disk_key disk_key;
3803 btrfs_item_key(leaf, &disk_key, 0);
3804 wret = fixup_low_keys(trans, root, path,
3810 /* delete the leaf if it is mostly empty */
3811 if (used < BTRFS_LEAF_DATA_SIZE(root) / 2) {
3812 /* push_leaf_left fixes the path.
3813 * make sure the path still points to our leaf
3814 * for possible call to del_ptr below
3816 slot = path->slots[1];
3817 extent_buffer_get(leaf);
3819 btrfs_set_path_blocking(path);
3820 wret = push_leaf_left(trans, root, path, 1, 1);
3821 if (wret < 0 && wret != -ENOSPC)
3824 if (path->nodes[0] == leaf &&
3825 btrfs_header_nritems(leaf)) {
3826 wret = push_leaf_right(trans, root, path, 1, 1);
3827 if (wret < 0 && wret != -ENOSPC)
3831 if (btrfs_header_nritems(leaf) == 0) {
3832 path->slots[1] = slot;
3833 ret = btrfs_del_leaf(trans, root, path, leaf);
3835 free_extent_buffer(leaf);
3837 /* if we're still in the path, make sure
3838 * we're dirty. Otherwise, one of the
3839 * push_leaf functions must have already
3840 * dirtied this buffer
3842 if (path->nodes[0] == leaf)
3843 btrfs_mark_buffer_dirty(leaf);
3844 free_extent_buffer(leaf);
3847 btrfs_mark_buffer_dirty(leaf);
3854 * search the tree again to find a leaf with lesser keys
3855 * returns 0 if it found something or 1 if there are no lesser leaves.
3856 * returns < 0 on io errors.
3858 * This may release the path, and so you may lose any locks held at the
3861 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3863 struct btrfs_key key;
3864 struct btrfs_disk_key found_key;
3867 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3871 else if (key.type > 0)
3873 else if (key.objectid > 0)
3878 btrfs_release_path(root, path);
3879 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3882 btrfs_item_key(path->nodes[0], &found_key, 0);
3883 ret = comp_keys(&found_key, &key);
3890 * A helper function to walk down the tree starting at min_key, and looking
3891 * for nodes or leaves that are either in cache or have a minimum
3892 * transaction id. This is used by the btree defrag code, and tree logging
3894 * This does not cow, but it does stuff the starting key it finds back
3895 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3896 * key and get a writable path.
3898 * This does lock as it descends, and path->keep_locks should be set
3899 * to 1 by the caller.
3901 * This honors path->lowest_level to prevent descent past a given level
3904 * min_trans indicates the oldest transaction that you are interested
3905 * in walking through. Any nodes or leaves older than min_trans are
3906 * skipped over (without reading them).
3908 * returns zero if something useful was found, < 0 on error and 1 if there
3909 * was nothing in the tree that matched the search criteria.
3911 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3912 struct btrfs_key *max_key,
3913 struct btrfs_path *path, int cache_only,
3916 struct extent_buffer *cur;
3917 struct btrfs_key found_key;
3924 WARN_ON(!path->keep_locks);
3926 cur = btrfs_lock_root_node(root);
3927 level = btrfs_header_level(cur);
3928 WARN_ON(path->nodes[level]);
3929 path->nodes[level] = cur;
3930 path->locks[level] = 1;
3932 if (btrfs_header_generation(cur) < min_trans) {
3937 nritems = btrfs_header_nritems(cur);
3938 level = btrfs_header_level(cur);
3939 sret = bin_search(cur, min_key, level, &slot);
3941 /* at the lowest level, we're done, setup the path and exit */
3942 if (level == path->lowest_level) {
3943 if (slot >= nritems)
3946 path->slots[level] = slot;
3947 btrfs_item_key_to_cpu(cur, &found_key, slot);
3950 if (sret && slot > 0)
3953 * check this node pointer against the cache_only and
3954 * min_trans parameters. If it isn't in cache or is too
3955 * old, skip to the next one.
3957 while (slot < nritems) {
3960 struct extent_buffer *tmp;
3961 struct btrfs_disk_key disk_key;
3963 blockptr = btrfs_node_blockptr(cur, slot);
3964 gen = btrfs_node_ptr_generation(cur, slot);
3965 if (gen < min_trans) {
3973 btrfs_node_key(cur, &disk_key, slot);
3974 if (comp_keys(&disk_key, max_key) >= 0) {
3980 tmp = btrfs_find_tree_block(root, blockptr,
3981 btrfs_level_size(root, level - 1));
3983 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
3984 free_extent_buffer(tmp);
3988 free_extent_buffer(tmp);
3993 * we didn't find a candidate key in this node, walk forward
3994 * and find another one
3996 if (slot >= nritems) {
3997 path->slots[level] = slot;
3998 btrfs_set_path_blocking(path);
3999 sret = btrfs_find_next_key(root, path, min_key, level,
4000 cache_only, min_trans);
4002 btrfs_release_path(root, path);
4008 /* save our key for returning back */
4009 btrfs_node_key_to_cpu(cur, &found_key, slot);
4010 path->slots[level] = slot;
4011 if (level == path->lowest_level) {
4013 unlock_up(path, level, 1);
4016 btrfs_set_path_blocking(path);
4017 cur = read_node_slot(root, cur, slot);
4019 btrfs_tree_lock(cur);
4021 path->locks[level - 1] = 1;
4022 path->nodes[level - 1] = cur;
4023 unlock_up(path, level, 1);
4024 btrfs_clear_path_blocking(path, NULL);
4028 memcpy(min_key, &found_key, sizeof(found_key));
4029 btrfs_set_path_blocking(path);
4034 * this is similar to btrfs_next_leaf, but does not try to preserve
4035 * and fixup the path. It looks for and returns the next key in the
4036 * tree based on the current path and the cache_only and min_trans
4039 * 0 is returned if another key is found, < 0 if there are any errors
4040 * and 1 is returned if there are no higher keys in the tree
4042 * path->keep_locks should be set to 1 on the search made before
4043 * calling this function.
4045 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4046 struct btrfs_key *key, int level,
4047 int cache_only, u64 min_trans)
4050 struct extent_buffer *c;
4052 WARN_ON(!path->keep_locks);
4053 while (level < BTRFS_MAX_LEVEL) {
4054 if (!path->nodes[level])
4057 slot = path->slots[level] + 1;
4058 c = path->nodes[level];
4060 if (slot >= btrfs_header_nritems(c)) {
4063 struct btrfs_key cur_key;
4064 if (level + 1 >= BTRFS_MAX_LEVEL ||
4065 !path->nodes[level + 1])
4068 if (path->locks[level + 1]) {
4073 slot = btrfs_header_nritems(c) - 1;
4075 btrfs_item_key_to_cpu(c, &cur_key, slot);
4077 btrfs_node_key_to_cpu(c, &cur_key, slot);
4079 orig_lowest = path->lowest_level;
4080 btrfs_release_path(root, path);
4081 path->lowest_level = level;
4082 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4084 path->lowest_level = orig_lowest;
4088 c = path->nodes[level];
4089 slot = path->slots[level];
4096 btrfs_item_key_to_cpu(c, key, slot);
4098 u64 blockptr = btrfs_node_blockptr(c, slot);
4099 u64 gen = btrfs_node_ptr_generation(c, slot);
4102 struct extent_buffer *cur;
4103 cur = btrfs_find_tree_block(root, blockptr,
4104 btrfs_level_size(root, level - 1));
4105 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4108 free_extent_buffer(cur);
4111 free_extent_buffer(cur);
4113 if (gen < min_trans) {
4117 btrfs_node_key_to_cpu(c, key, slot);
4125 * search the tree again to find a leaf with greater keys
4126 * returns 0 if it found something or 1 if there are no greater leaves.
4127 * returns < 0 on io errors.
4129 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4133 struct extent_buffer *c;
4134 struct extent_buffer *next;
4135 struct btrfs_key key;
4138 int old_spinning = path->leave_spinning;
4139 int force_blocking = 0;
4141 nritems = btrfs_header_nritems(path->nodes[0]);
4146 * we take the blocks in an order that upsets lockdep. Using
4147 * blocking mode is the only way around it.
4149 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4153 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4157 btrfs_release_path(root, path);
4159 path->keep_locks = 1;
4161 if (!force_blocking)
4162 path->leave_spinning = 1;
4164 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4165 path->keep_locks = 0;
4170 nritems = btrfs_header_nritems(path->nodes[0]);
4172 * by releasing the path above we dropped all our locks. A balance
4173 * could have added more items next to the key that used to be
4174 * at the very end of the block. So, check again here and
4175 * advance the path if there are now more items available.
4177 if (nritems > 0 && path->slots[0] < nritems - 1) {
4184 while (level < BTRFS_MAX_LEVEL) {
4185 if (!path->nodes[level]) {
4190 slot = path->slots[level] + 1;
4191 c = path->nodes[level];
4192 if (slot >= btrfs_header_nritems(c)) {
4194 if (level == BTRFS_MAX_LEVEL) {
4202 btrfs_tree_unlock(next);
4203 free_extent_buffer(next);
4207 ret = read_block_for_search(NULL, root, path, &next, level,
4213 btrfs_release_path(root, path);
4217 if (!path->skip_locking) {
4218 ret = btrfs_try_spin_lock(next);
4220 btrfs_set_path_blocking(path);
4221 btrfs_tree_lock(next);
4222 if (!force_blocking)
4223 btrfs_clear_path_blocking(path, next);
4226 btrfs_set_lock_blocking(next);
4230 path->slots[level] = slot;
4233 c = path->nodes[level];
4234 if (path->locks[level])
4235 btrfs_tree_unlock(c);
4237 free_extent_buffer(c);
4238 path->nodes[level] = next;
4239 path->slots[level] = 0;
4240 if (!path->skip_locking)
4241 path->locks[level] = 1;
4246 ret = read_block_for_search(NULL, root, path, &next, level,
4252 btrfs_release_path(root, path);
4256 if (!path->skip_locking) {
4257 btrfs_assert_tree_locked(path->nodes[level]);
4258 ret = btrfs_try_spin_lock(next);
4260 btrfs_set_path_blocking(path);
4261 btrfs_tree_lock(next);
4262 if (!force_blocking)
4263 btrfs_clear_path_blocking(path, next);
4266 btrfs_set_lock_blocking(next);
4271 unlock_up(path, 0, 1);
4272 path->leave_spinning = old_spinning;
4274 btrfs_set_path_blocking(path);
4280 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4281 * searching until it gets past min_objectid or finds an item of 'type'
4283 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4285 int btrfs_previous_item(struct btrfs_root *root,
4286 struct btrfs_path *path, u64 min_objectid,
4289 struct btrfs_key found_key;
4290 struct extent_buffer *leaf;
4295 if (path->slots[0] == 0) {
4296 btrfs_set_path_blocking(path);
4297 ret = btrfs_prev_leaf(root, path);
4303 leaf = path->nodes[0];
4304 nritems = btrfs_header_nritems(leaf);
4307 if (path->slots[0] == nritems)
4310 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4311 if (found_key.type == type)
4313 if (found_key.objectid < min_objectid)
4315 if (found_key.objectid == min_objectid &&
4316 found_key.type < type)