2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include <linux/blkdev.h>
26 #include "transaction.h"
28 #include "ref-cache.h"
31 static int total_trans = 0;
32 extern struct kmem_cache *btrfs_trans_handle_cachep;
33 extern struct kmem_cache *btrfs_transaction_cachep;
35 #define BTRFS_ROOT_TRANS_TAG 0
37 static noinline void put_transaction(struct btrfs_transaction *transaction)
39 WARN_ON(transaction->use_count == 0);
40 transaction->use_count--;
41 if (transaction->use_count == 0) {
42 WARN_ON(total_trans == 0);
44 list_del_init(&transaction->list);
45 memset(transaction, 0, sizeof(*transaction));
46 kmem_cache_free(btrfs_transaction_cachep, transaction);
51 * either allocate a new transaction or hop into the existing one
53 static noinline int join_transaction(struct btrfs_root *root)
55 struct btrfs_transaction *cur_trans;
56 cur_trans = root->fs_info->running_transaction;
58 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
62 root->fs_info->generation++;
63 root->fs_info->last_alloc = 0;
64 root->fs_info->last_data_alloc = 0;
65 cur_trans->num_writers = 1;
66 cur_trans->num_joined = 0;
67 cur_trans->transid = root->fs_info->generation;
68 init_waitqueue_head(&cur_trans->writer_wait);
69 init_waitqueue_head(&cur_trans->commit_wait);
70 cur_trans->in_commit = 0;
71 cur_trans->blocked = 0;
72 cur_trans->use_count = 1;
73 cur_trans->commit_done = 0;
74 cur_trans->start_time = get_seconds();
75 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
76 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
77 extent_io_tree_init(&cur_trans->dirty_pages,
78 root->fs_info->btree_inode->i_mapping,
80 spin_lock(&root->fs_info->new_trans_lock);
81 root->fs_info->running_transaction = cur_trans;
82 spin_unlock(&root->fs_info->new_trans_lock);
84 cur_trans->num_writers++;
85 cur_trans->num_joined++;
92 * this does all the record keeping required to make sure that a
93 * reference counted root is properly recorded in a given transaction.
94 * This is required to make sure the old root from before we joined the transaction
95 * is deleted when the transaction commits
97 noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
99 struct btrfs_dirty_root *dirty;
100 u64 running_trans_id = root->fs_info->running_transaction->transid;
101 if (root->ref_cows && root->last_trans < running_trans_id) {
102 WARN_ON(root == root->fs_info->extent_root);
103 if (root->root_item.refs != 0) {
104 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
105 (unsigned long)root->root_key.objectid,
106 BTRFS_ROOT_TRANS_TAG);
108 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
110 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
111 BUG_ON(!dirty->root);
112 dirty->latest_root = root;
113 INIT_LIST_HEAD(&dirty->list);
115 root->commit_root = btrfs_root_node(root);
117 memcpy(dirty->root, root, sizeof(*root));
118 spin_lock_init(&dirty->root->node_lock);
119 spin_lock_init(&dirty->root->list_lock);
120 mutex_init(&dirty->root->objectid_mutex);
121 mutex_init(&dirty->root->log_mutex);
122 INIT_LIST_HEAD(&dirty->root->dead_list);
123 dirty->root->node = root->commit_root;
124 dirty->root->commit_root = NULL;
126 spin_lock(&root->list_lock);
127 list_add(&dirty->root->dead_list, &root->dead_list);
128 spin_unlock(&root->list_lock);
130 root->dirty_root = dirty;
134 root->last_trans = running_trans_id;
139 /* wait for commit against the current transaction to become unblocked
140 * when this is done, it is safe to start a new transaction, but the current
141 * transaction might not be fully on disk.
143 static void wait_current_trans(struct btrfs_root *root)
145 struct btrfs_transaction *cur_trans;
147 cur_trans = root->fs_info->running_transaction;
148 if (cur_trans && cur_trans->blocked) {
150 cur_trans->use_count++;
152 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
153 TASK_UNINTERRUPTIBLE);
154 if (cur_trans->blocked) {
155 mutex_unlock(&root->fs_info->trans_mutex);
157 mutex_lock(&root->fs_info->trans_mutex);
158 finish_wait(&root->fs_info->transaction_wait,
161 finish_wait(&root->fs_info->transaction_wait,
166 put_transaction(cur_trans);
170 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
171 int num_blocks, int wait)
173 struct btrfs_trans_handle *h =
174 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
177 mutex_lock(&root->fs_info->trans_mutex);
178 if (!root->fs_info->log_root_recovering &&
179 ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
180 wait_current_trans(root);
181 ret = join_transaction(root);
184 btrfs_record_root_in_trans(root);
185 h->transid = root->fs_info->running_transaction->transid;
186 h->transaction = root->fs_info->running_transaction;
187 h->blocks_reserved = num_blocks;
189 h->block_group = NULL;
190 h->alloc_exclude_nr = 0;
191 h->alloc_exclude_start = 0;
192 root->fs_info->running_transaction->use_count++;
193 mutex_unlock(&root->fs_info->trans_mutex);
197 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
200 return start_transaction(root, num_blocks, 1);
202 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
205 return start_transaction(root, num_blocks, 0);
208 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
211 return start_transaction(r, num_blocks, 2);
214 /* wait for a transaction commit to be fully complete */
215 static noinline int wait_for_commit(struct btrfs_root *root,
216 struct btrfs_transaction *commit)
219 mutex_lock(&root->fs_info->trans_mutex);
220 while(!commit->commit_done) {
221 prepare_to_wait(&commit->commit_wait, &wait,
222 TASK_UNINTERRUPTIBLE);
223 if (commit->commit_done)
225 mutex_unlock(&root->fs_info->trans_mutex);
227 mutex_lock(&root->fs_info->trans_mutex);
229 mutex_unlock(&root->fs_info->trans_mutex);
230 finish_wait(&commit->commit_wait, &wait);
235 * rate limit against the drop_snapshot code. This helps to slow down new operations
236 * if the drop_snapshot code isn't able to keep up.
238 static void throttle_on_drops(struct btrfs_root *root)
240 struct btrfs_fs_info *info = root->fs_info;
241 int harder_count = 0;
244 if (atomic_read(&info->throttles)) {
247 thr = atomic_read(&info->throttle_gen);
250 prepare_to_wait(&info->transaction_throttle,
251 &wait, TASK_UNINTERRUPTIBLE);
252 if (!atomic_read(&info->throttles)) {
253 finish_wait(&info->transaction_throttle, &wait);
257 finish_wait(&info->transaction_throttle, &wait);
258 } while (thr == atomic_read(&info->throttle_gen));
261 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
265 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
269 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
275 void btrfs_throttle(struct btrfs_root *root)
277 mutex_lock(&root->fs_info->trans_mutex);
278 if (!root->fs_info->open_ioctl_trans)
279 wait_current_trans(root);
280 mutex_unlock(&root->fs_info->trans_mutex);
282 throttle_on_drops(root);
285 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
286 struct btrfs_root *root, int throttle)
288 struct btrfs_transaction *cur_trans;
289 struct btrfs_fs_info *info = root->fs_info;
291 mutex_lock(&info->trans_mutex);
292 cur_trans = info->running_transaction;
293 WARN_ON(cur_trans != trans->transaction);
294 WARN_ON(cur_trans->num_writers < 1);
295 cur_trans->num_writers--;
297 if (waitqueue_active(&cur_trans->writer_wait))
298 wake_up(&cur_trans->writer_wait);
299 put_transaction(cur_trans);
300 mutex_unlock(&info->trans_mutex);
301 memset(trans, 0, sizeof(*trans));
302 kmem_cache_free(btrfs_trans_handle_cachep, trans);
305 throttle_on_drops(root);
310 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
311 struct btrfs_root *root)
313 return __btrfs_end_transaction(trans, root, 0);
316 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
317 struct btrfs_root *root)
319 return __btrfs_end_transaction(trans, root, 1);
323 * when btree blocks are allocated, they have some corresponding bits set for
324 * them in one of two extent_io trees. This is used to make sure all of
325 * those extents are on disk for transaction or log commit
327 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
328 struct extent_io_tree *dirty_pages)
334 struct inode *btree_inode = root->fs_info->btree_inode;
340 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
344 while(start <= end) {
347 index = start >> PAGE_CACHE_SHIFT;
348 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
349 page = find_get_page(btree_inode->i_mapping, index);
353 btree_lock_page_hook(page);
354 if (!page->mapping) {
356 page_cache_release(page);
360 if (PageWriteback(page)) {
362 wait_on_page_writeback(page);
365 page_cache_release(page);
369 err = write_one_page(page, 0);
372 page_cache_release(page);
376 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
381 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
382 while(start <= end) {
383 index = start >> PAGE_CACHE_SHIFT;
384 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
385 page = find_get_page(btree_inode->i_mapping, index);
388 if (PageDirty(page)) {
389 btree_lock_page_hook(page);
390 wait_on_page_writeback(page);
391 err = write_one_page(page, 0);
395 wait_on_page_writeback(page);
396 page_cache_release(page);
405 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
406 struct btrfs_root *root)
408 if (!trans || !trans->transaction) {
409 struct inode *btree_inode;
410 btree_inode = root->fs_info->btree_inode;
411 return filemap_write_and_wait(btree_inode->i_mapping);
413 return btrfs_write_and_wait_marked_extents(root,
414 &trans->transaction->dirty_pages);
418 * this is used to update the root pointer in the tree of tree roots.
420 * But, in the case of the extent allocation tree, updating the root
421 * pointer may allocate blocks which may change the root of the extent
424 * So, this loops and repeats and makes sure the cowonly root didn't
425 * change while the root pointer was being updated in the metadata.
427 static int update_cowonly_root(struct btrfs_trans_handle *trans,
428 struct btrfs_root *root)
432 struct btrfs_root *tree_root = root->fs_info->tree_root;
434 btrfs_extent_post_op(trans, root);
435 btrfs_write_dirty_block_groups(trans, root);
436 btrfs_extent_post_op(trans, root);
439 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
440 if (old_root_bytenr == root->node->start)
442 btrfs_set_root_bytenr(&root->root_item,
444 btrfs_set_root_level(&root->root_item,
445 btrfs_header_level(root->node));
446 btrfs_set_root_generation(&root->root_item, trans->transid);
448 btrfs_extent_post_op(trans, root);
450 ret = btrfs_update_root(trans, tree_root,
454 btrfs_write_dirty_block_groups(trans, root);
455 btrfs_extent_post_op(trans, root);
461 * update all the cowonly tree roots on disk
463 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
464 struct btrfs_root *root)
466 struct btrfs_fs_info *fs_info = root->fs_info;
467 struct list_head *next;
468 struct extent_buffer *eb;
470 btrfs_extent_post_op(trans, fs_info->tree_root);
472 eb = btrfs_lock_root_node(fs_info->tree_root);
473 btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, 0);
474 btrfs_tree_unlock(eb);
475 free_extent_buffer(eb);
477 btrfs_extent_post_op(trans, fs_info->tree_root);
479 while(!list_empty(&fs_info->dirty_cowonly_roots)) {
480 next = fs_info->dirty_cowonly_roots.next;
482 root = list_entry(next, struct btrfs_root, dirty_list);
484 update_cowonly_root(trans, root);
490 * dead roots are old snapshots that need to be deleted. This allocates
491 * a dirty root struct and adds it into the list of dead roots that need to
494 int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest)
496 struct btrfs_dirty_root *dirty;
498 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
502 dirty->latest_root = latest;
504 mutex_lock(&root->fs_info->trans_mutex);
505 list_add(&dirty->list, &latest->fs_info->dead_roots);
506 mutex_unlock(&root->fs_info->trans_mutex);
511 * at transaction commit time we need to schedule the old roots for
512 * deletion via btrfs_drop_snapshot. This runs through all the
513 * reference counted roots that were modified in the current
514 * transaction and puts them into the drop list
516 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
517 struct radix_tree_root *radix,
518 struct list_head *list)
520 struct btrfs_dirty_root *dirty;
521 struct btrfs_root *gang[8];
522 struct btrfs_root *root;
529 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
531 BTRFS_ROOT_TRANS_TAG);
534 for (i = 0; i < ret; i++) {
536 radix_tree_tag_clear(radix,
537 (unsigned long)root->root_key.objectid,
538 BTRFS_ROOT_TRANS_TAG);
540 BUG_ON(!root->ref_tree);
541 dirty = root->dirty_root;
543 btrfs_free_log(trans, root);
544 btrfs_free_reloc_root(trans, root);
546 if (root->commit_root == root->node) {
547 WARN_ON(root->node->start !=
548 btrfs_root_bytenr(&root->root_item));
550 free_extent_buffer(root->commit_root);
551 root->commit_root = NULL;
552 root->dirty_root = NULL;
554 spin_lock(&root->list_lock);
555 list_del_init(&dirty->root->dead_list);
556 spin_unlock(&root->list_lock);
561 /* make sure to update the root on disk
562 * so we get any updates to the block used
565 err = btrfs_update_root(trans,
566 root->fs_info->tree_root,
572 memset(&root->root_item.drop_progress, 0,
573 sizeof(struct btrfs_disk_key));
574 root->root_item.drop_level = 0;
575 root->commit_root = NULL;
576 root->dirty_root = NULL;
577 root->root_key.offset = root->fs_info->generation;
578 btrfs_set_root_bytenr(&root->root_item,
580 btrfs_set_root_level(&root->root_item,
581 btrfs_header_level(root->node));
582 btrfs_set_root_generation(&root->root_item,
583 root->root_key.offset);
585 err = btrfs_insert_root(trans, root->fs_info->tree_root,
591 refs = btrfs_root_refs(&dirty->root->root_item);
592 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
593 err = btrfs_update_root(trans, root->fs_info->tree_root,
594 &dirty->root->root_key,
595 &dirty->root->root_item);
599 list_add(&dirty->list, list);
602 free_extent_buffer(dirty->root->node);
612 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
613 * otherwise every leaf in the btree is read and defragged.
615 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
617 struct btrfs_fs_info *info = root->fs_info;
619 struct btrfs_trans_handle *trans;
623 if (root->defrag_running)
625 trans = btrfs_start_transaction(root, 1);
627 root->defrag_running = 1;
628 ret = btrfs_defrag_leaves(trans, root, cacheonly);
629 nr = trans->blocks_used;
630 btrfs_end_transaction(trans, root);
631 btrfs_btree_balance_dirty(info->tree_root, nr);
634 trans = btrfs_start_transaction(root, 1);
635 if (root->fs_info->closing || ret != -EAGAIN)
638 root->defrag_running = 0;
640 btrfs_end_transaction(trans, root);
645 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
648 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
649 struct list_head *list)
651 struct btrfs_dirty_root *dirty;
652 struct btrfs_trans_handle *trans;
660 while(!list_empty(list)) {
661 struct btrfs_root *root;
663 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
664 list_del_init(&dirty->list);
666 num_bytes = btrfs_root_used(&dirty->root->root_item);
667 root = dirty->latest_root;
668 atomic_inc(&root->fs_info->throttles);
671 trans = btrfs_start_transaction(tree_root, 1);
672 mutex_lock(&root->fs_info->drop_mutex);
673 ret = btrfs_drop_snapshot(trans, dirty->root);
674 if (ret != -EAGAIN) {
677 mutex_unlock(&root->fs_info->drop_mutex);
679 err = btrfs_update_root(trans,
681 &dirty->root->root_key,
682 &dirty->root->root_item);
685 nr = trans->blocks_used;
686 ret = btrfs_end_transaction(trans, tree_root);
689 btrfs_btree_balance_dirty(tree_root, nr);
693 atomic_dec(&root->fs_info->throttles);
694 wake_up(&root->fs_info->transaction_throttle);
696 num_bytes -= btrfs_root_used(&dirty->root->root_item);
697 bytes_used = btrfs_root_used(&root->root_item);
699 btrfs_record_root_in_trans(root);
700 btrfs_set_root_used(&root->root_item,
701 bytes_used - num_bytes);
704 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
709 mutex_unlock(&root->fs_info->drop_mutex);
711 spin_lock(&root->list_lock);
712 list_del_init(&dirty->root->dead_list);
713 if (!list_empty(&root->dead_list)) {
714 struct btrfs_root *oldest;
715 oldest = list_entry(root->dead_list.prev,
716 struct btrfs_root, dead_list);
717 max_useless = oldest->root_key.offset - 1;
719 max_useless = root->root_key.offset - 1;
721 spin_unlock(&root->list_lock);
723 nr = trans->blocks_used;
724 ret = btrfs_end_transaction(trans, tree_root);
727 ret = btrfs_remove_leaf_refs(root, max_useless, 0);
730 free_extent_buffer(dirty->root->node);
734 btrfs_btree_balance_dirty(tree_root, nr);
741 * new snapshots need to be created at a very specific time in the
742 * transaction commit. This does the actual creation
744 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
745 struct btrfs_fs_info *fs_info,
746 struct btrfs_pending_snapshot *pending)
748 struct btrfs_key key;
749 struct btrfs_root_item *new_root_item;
750 struct btrfs_root *tree_root = fs_info->tree_root;
751 struct btrfs_root *root = pending->root;
752 struct extent_buffer *tmp;
753 struct extent_buffer *old;
757 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
758 if (!new_root_item) {
762 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
766 btrfs_record_root_in_trans(root);
767 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
768 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
770 key.objectid = objectid;
771 key.offset = trans->transid;
772 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
774 old = btrfs_lock_root_node(root);
775 btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
777 btrfs_copy_root(trans, root, old, &tmp, objectid);
778 btrfs_tree_unlock(old);
779 free_extent_buffer(old);
781 btrfs_set_root_bytenr(new_root_item, tmp->start);
782 btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
783 btrfs_set_root_generation(new_root_item, trans->transid);
784 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
786 btrfs_tree_unlock(tmp);
787 free_extent_buffer(tmp);
791 key.offset = (u64)-1;
792 memcpy(&pending->root_key, &key, sizeof(key));
794 kfree(new_root_item);
798 static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
799 struct btrfs_pending_snapshot *pending)
804 struct btrfs_trans_handle *trans;
805 struct inode *parent_inode;
807 struct btrfs_root *parent_root;
809 parent_inode = pending->dentry->d_parent->d_inode;
810 parent_root = BTRFS_I(parent_inode)->root;
811 trans = btrfs_start_transaction(parent_root, 1);
814 * insert the directory item
816 namelen = strlen(pending->name);
817 ret = btrfs_set_inode_index(parent_inode, &index);
818 ret = btrfs_insert_dir_item(trans, parent_root,
819 pending->name, namelen,
821 &pending->root_key, BTRFS_FT_DIR, index);
826 /* add the backref first */
827 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
828 pending->root_key.objectid,
829 BTRFS_ROOT_BACKREF_KEY,
830 parent_root->root_key.objectid,
831 parent_inode->i_ino, index, pending->name,
836 /* now add the forward ref */
837 ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
838 parent_root->root_key.objectid,
840 pending->root_key.objectid,
841 parent_inode->i_ino, index, pending->name,
844 inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
845 d_instantiate(pending->dentry, inode);
847 btrfs_end_transaction(trans, fs_info->fs_root);
852 * create all the snapshots we've scheduled for creation
854 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
855 struct btrfs_fs_info *fs_info)
857 struct btrfs_pending_snapshot *pending;
858 struct list_head *head = &trans->transaction->pending_snapshots;
859 struct list_head *cur;
862 list_for_each(cur, head) {
863 pending = list_entry(cur, struct btrfs_pending_snapshot, list);
864 ret = create_pending_snapshot(trans, fs_info, pending);
870 static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
871 struct btrfs_fs_info *fs_info)
873 struct btrfs_pending_snapshot *pending;
874 struct list_head *head = &trans->transaction->pending_snapshots;
877 while(!list_empty(head)) {
878 pending = list_entry(head->next,
879 struct btrfs_pending_snapshot, list);
880 ret = finish_pending_snapshot(fs_info, pending);
882 list_del(&pending->list);
883 kfree(pending->name);
889 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
890 struct btrfs_root *root)
892 unsigned long joined = 0;
893 unsigned long timeout = 1;
894 struct btrfs_transaction *cur_trans;
895 struct btrfs_transaction *prev_trans = NULL;
896 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
897 struct list_head dirty_fs_roots;
898 struct extent_io_tree *pinned_copy;
902 INIT_LIST_HEAD(&dirty_fs_roots);
903 mutex_lock(&root->fs_info->trans_mutex);
904 if (trans->transaction->in_commit) {
905 cur_trans = trans->transaction;
906 trans->transaction->use_count++;
907 mutex_unlock(&root->fs_info->trans_mutex);
908 btrfs_end_transaction(trans, root);
910 ret = wait_for_commit(root, cur_trans);
913 mutex_lock(&root->fs_info->trans_mutex);
914 put_transaction(cur_trans);
915 mutex_unlock(&root->fs_info->trans_mutex);
920 pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
924 extent_io_tree_init(pinned_copy,
925 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
927 trans->transaction->in_commit = 1;
928 trans->transaction->blocked = 1;
929 cur_trans = trans->transaction;
930 if (cur_trans->list.prev != &root->fs_info->trans_list) {
931 prev_trans = list_entry(cur_trans->list.prev,
932 struct btrfs_transaction, list);
933 if (!prev_trans->commit_done) {
934 prev_trans->use_count++;
935 mutex_unlock(&root->fs_info->trans_mutex);
937 wait_for_commit(root, prev_trans);
939 mutex_lock(&root->fs_info->trans_mutex);
940 put_transaction(prev_trans);
945 int snap_pending = 0;
946 joined = cur_trans->num_joined;
947 if (!list_empty(&trans->transaction->pending_snapshots))
950 WARN_ON(cur_trans != trans->transaction);
951 prepare_to_wait(&cur_trans->writer_wait, &wait,
952 TASK_UNINTERRUPTIBLE);
954 if (cur_trans->num_writers > 1)
955 timeout = MAX_SCHEDULE_TIMEOUT;
959 mutex_unlock(&root->fs_info->trans_mutex);
962 ret = btrfs_wait_ordered_extents(root, 1);
966 schedule_timeout(timeout);
968 mutex_lock(&root->fs_info->trans_mutex);
969 finish_wait(&cur_trans->writer_wait, &wait);
970 } while (cur_trans->num_writers > 1 ||
971 (cur_trans->num_joined != joined));
973 ret = create_pending_snapshots(trans, root->fs_info);
976 WARN_ON(cur_trans != trans->transaction);
978 /* btrfs_commit_tree_roots is responsible for getting the
979 * various roots consistent with each other. Every pointer
980 * in the tree of tree roots has to point to the most up to date
981 * root for every subvolume and other tree. So, we have to keep
982 * the tree logging code from jumping in and changing any
985 * At this point in the commit, there can't be any tree-log
986 * writers, but a little lower down we drop the trans mutex
987 * and let new people in. By holding the tree_log_mutex
988 * from now until after the super is written, we avoid races
989 * with the tree-log code.
991 mutex_lock(&root->fs_info->tree_log_mutex);
993 * keep tree reloc code from adding new reloc trees
995 mutex_lock(&root->fs_info->tree_reloc_mutex);
998 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
1002 /* add_dirty_roots gets rid of all the tree log roots, it is now
1003 * safe to free the root of tree log roots
1005 btrfs_free_log_root_tree(trans, root->fs_info);
1007 ret = btrfs_commit_tree_roots(trans, root);
1010 cur_trans = root->fs_info->running_transaction;
1011 spin_lock(&root->fs_info->new_trans_lock);
1012 root->fs_info->running_transaction = NULL;
1013 spin_unlock(&root->fs_info->new_trans_lock);
1014 btrfs_set_super_generation(&root->fs_info->super_copy,
1015 cur_trans->transid);
1016 btrfs_set_super_root(&root->fs_info->super_copy,
1017 root->fs_info->tree_root->node->start);
1018 btrfs_set_super_root_level(&root->fs_info->super_copy,
1019 btrfs_header_level(root->fs_info->tree_root->node));
1021 btrfs_set_super_chunk_root(&root->fs_info->super_copy,
1022 chunk_root->node->start);
1023 btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
1024 btrfs_header_level(chunk_root->node));
1025 btrfs_set_super_chunk_root_generation(&root->fs_info->super_copy,
1026 btrfs_header_generation(chunk_root->node));
1028 if (!root->fs_info->log_root_recovering) {
1029 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1030 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1033 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1034 sizeof(root->fs_info->super_copy));
1036 btrfs_copy_pinned(root, pinned_copy);
1038 trans->transaction->blocked = 0;
1039 wake_up(&root->fs_info->transaction_throttle);
1040 wake_up(&root->fs_info->transaction_wait);
1042 mutex_unlock(&root->fs_info->trans_mutex);
1043 ret = btrfs_write_and_wait_transaction(trans, root);
1045 write_ctree_super(trans, root);
1048 * the super is written, we can safely allow the tree-loggers
1049 * to go about their business
1051 mutex_unlock(&root->fs_info->tree_log_mutex);
1053 btrfs_finish_extent_commit(trans, root, pinned_copy);
1056 btrfs_drop_dead_reloc_roots(root);
1057 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1059 /* do the directory inserts of any pending snapshot creations */
1060 finish_pending_snapshots(trans, root->fs_info);
1062 mutex_lock(&root->fs_info->trans_mutex);
1064 cur_trans->commit_done = 1;
1065 root->fs_info->last_trans_committed = cur_trans->transid;
1066 wake_up(&cur_trans->commit_wait);
1068 put_transaction(cur_trans);
1069 put_transaction(cur_trans);
1071 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
1072 if (root->fs_info->closing)
1073 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
1075 mutex_unlock(&root->fs_info->trans_mutex);
1077 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1079 if (root->fs_info->closing) {
1080 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
1086 * interface function to delete all the snapshots we have scheduled for deletion
1088 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1090 struct list_head dirty_roots;
1091 INIT_LIST_HEAD(&dirty_roots);
1093 mutex_lock(&root->fs_info->trans_mutex);
1094 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
1095 mutex_unlock(&root->fs_info->trans_mutex);
1097 if (!list_empty(&dirty_roots)) {
1098 drop_dirty_roots(root, &dirty_roots);