hugetlbfs: return negative error code for bad mount option
[linux-2.6] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include "compat.h"
25 #include "hash.h"
26 #include "crc32c.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "ref-cache.h"
34 #include "free-space-cache.h"
35
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
39
40 struct pending_extent_op {
41         int type;
42         u64 bytenr;
43         u64 num_bytes;
44         u64 parent;
45         u64 orig_parent;
46         u64 generation;
47         u64 orig_generation;
48         int level;
49         struct list_head list;
50         int del;
51 };
52
53 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
54                                          struct btrfs_root *root, u64 parent,
55                                          u64 root_objectid, u64 ref_generation,
56                                          u64 owner, struct btrfs_key *ins,
57                                          int ref_mod);
58 static int update_reserved_extents(struct btrfs_root *root,
59                                    u64 bytenr, u64 num, int reserve);
60 static int update_block_group(struct btrfs_trans_handle *trans,
61                               struct btrfs_root *root,
62                               u64 bytenr, u64 num_bytes, int alloc,
63                               int mark_free);
64 static noinline int __btrfs_free_extent(struct btrfs_trans_handle *trans,
65                                         struct btrfs_root *root,
66                                         u64 bytenr, u64 num_bytes, u64 parent,
67                                         u64 root_objectid, u64 ref_generation,
68                                         u64 owner_objectid, int pin,
69                                         int ref_to_drop);
70
71 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
72                           struct btrfs_root *extent_root, u64 alloc_bytes,
73                           u64 flags, int force);
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77         return (cache->flags & bits) == bits;
78 }
79
80 /*
81  * this adds the block group to the fs_info rb tree for the block group
82  * cache
83  */
84 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
85                                 struct btrfs_block_group_cache *block_group)
86 {
87         struct rb_node **p;
88         struct rb_node *parent = NULL;
89         struct btrfs_block_group_cache *cache;
90
91         spin_lock(&info->block_group_cache_lock);
92         p = &info->block_group_cache_tree.rb_node;
93
94         while (*p) {
95                 parent = *p;
96                 cache = rb_entry(parent, struct btrfs_block_group_cache,
97                                  cache_node);
98                 if (block_group->key.objectid < cache->key.objectid) {
99                         p = &(*p)->rb_left;
100                 } else if (block_group->key.objectid > cache->key.objectid) {
101                         p = &(*p)->rb_right;
102                 } else {
103                         spin_unlock(&info->block_group_cache_lock);
104                         return -EEXIST;
105                 }
106         }
107
108         rb_link_node(&block_group->cache_node, parent, p);
109         rb_insert_color(&block_group->cache_node,
110                         &info->block_group_cache_tree);
111         spin_unlock(&info->block_group_cache_lock);
112
113         return 0;
114 }
115
116 /*
117  * This will return the block group at or after bytenr if contains is 0, else
118  * it will return the block group that contains the bytenr
119  */
120 static struct btrfs_block_group_cache *
121 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
122                               int contains)
123 {
124         struct btrfs_block_group_cache *cache, *ret = NULL;
125         struct rb_node *n;
126         u64 end, start;
127
128         spin_lock(&info->block_group_cache_lock);
129         n = info->block_group_cache_tree.rb_node;
130
131         while (n) {
132                 cache = rb_entry(n, struct btrfs_block_group_cache,
133                                  cache_node);
134                 end = cache->key.objectid + cache->key.offset - 1;
135                 start = cache->key.objectid;
136
137                 if (bytenr < start) {
138                         if (!contains && (!ret || start < ret->key.objectid))
139                                 ret = cache;
140                         n = n->rb_left;
141                 } else if (bytenr > start) {
142                         if (contains && bytenr <= end) {
143                                 ret = cache;
144                                 break;
145                         }
146                         n = n->rb_right;
147                 } else {
148                         ret = cache;
149                         break;
150                 }
151         }
152         if (ret)
153                 atomic_inc(&ret->count);
154         spin_unlock(&info->block_group_cache_lock);
155
156         return ret;
157 }
158
159 /*
160  * this is only called by cache_block_group, since we could have freed extents
161  * we need to check the pinned_extents for any extents that can't be used yet
162  * since their free space will be released as soon as the transaction commits.
163  */
164 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
165                               struct btrfs_fs_info *info, u64 start, u64 end)
166 {
167         u64 extent_start, extent_end, size;
168         int ret;
169
170         while (start < end) {
171                 ret = find_first_extent_bit(&info->pinned_extents, start,
172                                             &extent_start, &extent_end,
173                                             EXTENT_DIRTY);
174                 if (ret)
175                         break;
176
177                 if (extent_start == start) {
178                         start = extent_end + 1;
179                 } else if (extent_start > start && extent_start < end) {
180                         size = extent_start - start;
181                         ret = btrfs_add_free_space(block_group, start,
182                                                    size);
183                         BUG_ON(ret);
184                         start = extent_end + 1;
185                 } else {
186                         break;
187                 }
188         }
189
190         if (start < end) {
191                 size = end - start;
192                 ret = btrfs_add_free_space(block_group, start, size);
193                 BUG_ON(ret);
194         }
195
196         return 0;
197 }
198
199 static int remove_sb_from_cache(struct btrfs_root *root,
200                                 struct btrfs_block_group_cache *cache)
201 {
202         u64 bytenr;
203         u64 *logical;
204         int stripe_len;
205         int i, nr, ret;
206
207         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
208                 bytenr = btrfs_sb_offset(i);
209                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
210                                        cache->key.objectid, bytenr, 0,
211                                        &logical, &nr, &stripe_len);
212                 BUG_ON(ret);
213                 while (nr--) {
214                         btrfs_remove_free_space(cache, logical[nr],
215                                                 stripe_len);
216                 }
217                 kfree(logical);
218         }
219         return 0;
220 }
221
222 static int cache_block_group(struct btrfs_root *root,
223                              struct btrfs_block_group_cache *block_group)
224 {
225         struct btrfs_path *path;
226         int ret = 0;
227         struct btrfs_key key;
228         struct extent_buffer *leaf;
229         int slot;
230         u64 last;
231
232         if (!block_group)
233                 return 0;
234
235         root = root->fs_info->extent_root;
236
237         if (block_group->cached)
238                 return 0;
239
240         path = btrfs_alloc_path();
241         if (!path)
242                 return -ENOMEM;
243
244         path->reada = 2;
245         /*
246          * we get into deadlocks with paths held by callers of this function.
247          * since the alloc_mutex is protecting things right now, just
248          * skip the locking here
249          */
250         path->skip_locking = 1;
251         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
252         key.objectid = last;
253         key.offset = 0;
254         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
255         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
256         if (ret < 0)
257                 goto err;
258
259         while (1) {
260                 leaf = path->nodes[0];
261                 slot = path->slots[0];
262                 if (slot >= btrfs_header_nritems(leaf)) {
263                         ret = btrfs_next_leaf(root, path);
264                         if (ret < 0)
265                                 goto err;
266                         if (ret == 0)
267                                 continue;
268                         else
269                                 break;
270                 }
271                 btrfs_item_key_to_cpu(leaf, &key, slot);
272                 if (key.objectid < block_group->key.objectid)
273                         goto next;
274
275                 if (key.objectid >= block_group->key.objectid +
276                     block_group->key.offset)
277                         break;
278
279                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
280                         add_new_free_space(block_group, root->fs_info, last,
281                                            key.objectid);
282
283                         last = key.objectid + key.offset;
284                 }
285 next:
286                 path->slots[0]++;
287         }
288
289         add_new_free_space(block_group, root->fs_info, last,
290                            block_group->key.objectid +
291                            block_group->key.offset);
292
293         block_group->cached = 1;
294         remove_sb_from_cache(root, block_group);
295         ret = 0;
296 err:
297         btrfs_free_path(path);
298         return ret;
299 }
300
301 /*
302  * return the block group that starts at or after bytenr
303  */
304 static struct btrfs_block_group_cache *
305 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
306 {
307         struct btrfs_block_group_cache *cache;
308
309         cache = block_group_cache_tree_search(info, bytenr, 0);
310
311         return cache;
312 }
313
314 /*
315  * return the block group that contains teh given bytenr
316  */
317 struct btrfs_block_group_cache *btrfs_lookup_block_group(
318                                                  struct btrfs_fs_info *info,
319                                                  u64 bytenr)
320 {
321         struct btrfs_block_group_cache *cache;
322
323         cache = block_group_cache_tree_search(info, bytenr, 1);
324
325         return cache;
326 }
327
328 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
329 {
330         if (atomic_dec_and_test(&cache->count))
331                 kfree(cache);
332 }
333
334 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
335                                                   u64 flags)
336 {
337         struct list_head *head = &info->space_info;
338         struct btrfs_space_info *found;
339
340         rcu_read_lock();
341         list_for_each_entry_rcu(found, head, list) {
342                 if (found->flags == flags) {
343                         rcu_read_unlock();
344                         return found;
345                 }
346         }
347         rcu_read_unlock();
348         return NULL;
349 }
350
351 /*
352  * after adding space to the filesystem, we need to clear the full flags
353  * on all the space infos.
354  */
355 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
356 {
357         struct list_head *head = &info->space_info;
358         struct btrfs_space_info *found;
359
360         rcu_read_lock();
361         list_for_each_entry_rcu(found, head, list)
362                 found->full = 0;
363         rcu_read_unlock();
364 }
365
366 static u64 div_factor(u64 num, int factor)
367 {
368         if (factor == 10)
369                 return num;
370         num *= factor;
371         do_div(num, 10);
372         return num;
373 }
374
375 u64 btrfs_find_block_group(struct btrfs_root *root,
376                            u64 search_start, u64 search_hint, int owner)
377 {
378         struct btrfs_block_group_cache *cache;
379         u64 used;
380         u64 last = max(search_hint, search_start);
381         u64 group_start = 0;
382         int full_search = 0;
383         int factor = 9;
384         int wrapped = 0;
385 again:
386         while (1) {
387                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
388                 if (!cache)
389                         break;
390
391                 spin_lock(&cache->lock);
392                 last = cache->key.objectid + cache->key.offset;
393                 used = btrfs_block_group_used(&cache->item);
394
395                 if ((full_search || !cache->ro) &&
396                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
397                         if (used + cache->pinned + cache->reserved <
398                             div_factor(cache->key.offset, factor)) {
399                                 group_start = cache->key.objectid;
400                                 spin_unlock(&cache->lock);
401                                 btrfs_put_block_group(cache);
402                                 goto found;
403                         }
404                 }
405                 spin_unlock(&cache->lock);
406                 btrfs_put_block_group(cache);
407                 cond_resched();
408         }
409         if (!wrapped) {
410                 last = search_start;
411                 wrapped = 1;
412                 goto again;
413         }
414         if (!full_search && factor < 10) {
415                 last = search_start;
416                 full_search = 1;
417                 factor = 10;
418                 goto again;
419         }
420 found:
421         return group_start;
422 }
423
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
426 {
427         int ret;
428         struct btrfs_key key;
429         struct btrfs_path *path;
430
431         path = btrfs_alloc_path();
432         BUG_ON(!path);
433         key.objectid = start;
434         key.offset = len;
435         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
436         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
437                                 0, 0);
438         btrfs_free_path(path);
439         return ret;
440 }
441
442 /*
443  * Back reference rules.  Back refs have three main goals:
444  *
445  * 1) differentiate between all holders of references to an extent so that
446  *    when a reference is dropped we can make sure it was a valid reference
447  *    before freeing the extent.
448  *
449  * 2) Provide enough information to quickly find the holders of an extent
450  *    if we notice a given block is corrupted or bad.
451  *
452  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453  *    maintenance.  This is actually the same as #2, but with a slightly
454  *    different use case.
455  *
456  * File extents can be referenced by:
457  *
458  * - multiple snapshots, subvolumes, or different generations in one subvol
459  * - different files inside a single subvolume
460  * - different offsets inside a file (bookend extents in file.c)
461  *
462  * The extent ref structure has fields for:
463  *
464  * - Objectid of the subvolume root
465  * - Generation number of the tree holding the reference
466  * - objectid of the file holding the reference
467  * - number of references holding by parent node (alway 1 for tree blocks)
468  *
469  * Btree leaf may hold multiple references to a file extent. In most cases,
470  * these references are from same file and the corresponding offsets inside
471  * the file are close together.
472  *
473  * When a file extent is allocated the fields are filled in:
474  *     (root_key.objectid, trans->transid, inode objectid, 1)
475  *
476  * When a leaf is cow'd new references are added for every file extent found
477  * in the leaf.  It looks similar to the create case, but trans->transid will
478  * be different when the block is cow'd.
479  *
480  *     (root_key.objectid, trans->transid, inode objectid,
481  *      number of references in the leaf)
482  *
483  * When a file extent is removed either during snapshot deletion or
484  * file truncation, we find the corresponding back reference and check
485  * the following fields:
486  *
487  *     (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
488  *      inode objectid)
489  *
490  * Btree extents can be referenced by:
491  *
492  * - Different subvolumes
493  * - Different generations of the same subvolume
494  *
495  * When a tree block is created, back references are inserted:
496  *
497  * (root->root_key.objectid, trans->transid, level, 1)
498  *
499  * When a tree block is cow'd, new back references are added for all the
500  * blocks it points to. If the tree block isn't in reference counted root,
501  * the old back references are removed. These new back references are of
502  * the form (trans->transid will have increased since creation):
503  *
504  * (root->root_key.objectid, trans->transid, level, 1)
505  *
506  * When a backref is in deleting, the following fields are checked:
507  *
508  * if backref was for a tree root:
509  *     (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
510  * else
511  *     (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
512  *
513  * Back Reference Key composing:
514  *
515  * The key objectid corresponds to the first byte in the extent, the key
516  * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517  * byte of parent extent. If a extent is tree root, the key offset is set
518  * to the key objectid.
519  */
520
521 static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
522                                           struct btrfs_root *root,
523                                           struct btrfs_path *path,
524                                           u64 bytenr, u64 parent,
525                                           u64 ref_root, u64 ref_generation,
526                                           u64 owner_objectid, int del)
527 {
528         struct btrfs_key key;
529         struct btrfs_extent_ref *ref;
530         struct extent_buffer *leaf;
531         u64 ref_objectid;
532         int ret;
533
534         key.objectid = bytenr;
535         key.type = BTRFS_EXTENT_REF_KEY;
536         key.offset = parent;
537
538         ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
539         if (ret < 0)
540                 goto out;
541         if (ret > 0) {
542                 ret = -ENOENT;
543                 goto out;
544         }
545
546         leaf = path->nodes[0];
547         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
548         ref_objectid = btrfs_ref_objectid(leaf, ref);
549         if (btrfs_ref_root(leaf, ref) != ref_root ||
550             btrfs_ref_generation(leaf, ref) != ref_generation ||
551             (ref_objectid != owner_objectid &&
552              ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
553                 ret = -EIO;
554                 WARN_ON(1);
555                 goto out;
556         }
557         ret = 0;
558 out:
559         return ret;
560 }
561
562 static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
563                                           struct btrfs_root *root,
564                                           struct btrfs_path *path,
565                                           u64 bytenr, u64 parent,
566                                           u64 ref_root, u64 ref_generation,
567                                           u64 owner_objectid,
568                                           int refs_to_add)
569 {
570         struct btrfs_key key;
571         struct extent_buffer *leaf;
572         struct btrfs_extent_ref *ref;
573         u32 num_refs;
574         int ret;
575
576         key.objectid = bytenr;
577         key.type = BTRFS_EXTENT_REF_KEY;
578         key.offset = parent;
579
580         ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
581         if (ret == 0) {
582                 leaf = path->nodes[0];
583                 ref = btrfs_item_ptr(leaf, path->slots[0],
584                                      struct btrfs_extent_ref);
585                 btrfs_set_ref_root(leaf, ref, ref_root);
586                 btrfs_set_ref_generation(leaf, ref, ref_generation);
587                 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
588                 btrfs_set_ref_num_refs(leaf, ref, refs_to_add);
589         } else if (ret == -EEXIST) {
590                 u64 existing_owner;
591
592                 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
593                 leaf = path->nodes[0];
594                 ref = btrfs_item_ptr(leaf, path->slots[0],
595                                      struct btrfs_extent_ref);
596                 if (btrfs_ref_root(leaf, ref) != ref_root ||
597                     btrfs_ref_generation(leaf, ref) != ref_generation) {
598                         ret = -EIO;
599                         WARN_ON(1);
600                         goto out;
601                 }
602
603                 num_refs = btrfs_ref_num_refs(leaf, ref);
604                 BUG_ON(num_refs == 0);
605                 btrfs_set_ref_num_refs(leaf, ref, num_refs + refs_to_add);
606
607                 existing_owner = btrfs_ref_objectid(leaf, ref);
608                 if (existing_owner != owner_objectid &&
609                     existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
610                         btrfs_set_ref_objectid(leaf, ref,
611                                         BTRFS_MULTIPLE_OBJECTIDS);
612                 }
613                 ret = 0;
614         } else {
615                 goto out;
616         }
617         btrfs_unlock_up_safe(path, 1);
618         btrfs_mark_buffer_dirty(path->nodes[0]);
619 out:
620         btrfs_release_path(root, path);
621         return ret;
622 }
623
624 static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
625                                           struct btrfs_root *root,
626                                           struct btrfs_path *path,
627                                           int refs_to_drop)
628 {
629         struct extent_buffer *leaf;
630         struct btrfs_extent_ref *ref;
631         u32 num_refs;
632         int ret = 0;
633
634         leaf = path->nodes[0];
635         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
636         num_refs = btrfs_ref_num_refs(leaf, ref);
637         BUG_ON(num_refs < refs_to_drop);
638         num_refs -= refs_to_drop;
639         if (num_refs == 0) {
640                 ret = btrfs_del_item(trans, root, path);
641         } else {
642                 btrfs_set_ref_num_refs(leaf, ref, num_refs);
643                 btrfs_mark_buffer_dirty(leaf);
644         }
645         btrfs_release_path(root, path);
646         return ret;
647 }
648
649 #ifdef BIO_RW_DISCARD
650 static void btrfs_issue_discard(struct block_device *bdev,
651                                 u64 start, u64 len)
652 {
653         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
654 }
655 #endif
656
657 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
658                                 u64 num_bytes)
659 {
660 #ifdef BIO_RW_DISCARD
661         int ret;
662         u64 map_length = num_bytes;
663         struct btrfs_multi_bio *multi = NULL;
664
665         /* Tell the block device(s) that the sectors can be discarded */
666         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
667                               bytenr, &map_length, &multi, 0);
668         if (!ret) {
669                 struct btrfs_bio_stripe *stripe = multi->stripes;
670                 int i;
671
672                 if (map_length > num_bytes)
673                         map_length = num_bytes;
674
675                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
676                         btrfs_issue_discard(stripe->dev->bdev,
677                                             stripe->physical,
678                                             map_length);
679                 }
680                 kfree(multi);
681         }
682
683         return ret;
684 #else
685         return 0;
686 #endif
687 }
688
689 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
690                                      struct btrfs_root *root, u64 bytenr,
691                                      u64 num_bytes,
692                                      u64 orig_parent, u64 parent,
693                                      u64 orig_root, u64 ref_root,
694                                      u64 orig_generation, u64 ref_generation,
695                                      u64 owner_objectid)
696 {
697         int ret;
698         int pin = owner_objectid < BTRFS_FIRST_FREE_OBJECTID;
699
700         ret = btrfs_update_delayed_ref(trans, bytenr, num_bytes,
701                                        orig_parent, parent, orig_root,
702                                        ref_root, orig_generation,
703                                        ref_generation, owner_objectid, pin);
704         BUG_ON(ret);
705         return ret;
706 }
707
708 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
709                             struct btrfs_root *root, u64 bytenr,
710                             u64 num_bytes, u64 orig_parent, u64 parent,
711                             u64 ref_root, u64 ref_generation,
712                             u64 owner_objectid)
713 {
714         int ret;
715         if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
716             owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
717                 return 0;
718
719         ret = __btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
720                                         orig_parent, parent, ref_root,
721                                         ref_root, ref_generation,
722                                         ref_generation, owner_objectid);
723         return ret;
724 }
725 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
726                                   struct btrfs_root *root, u64 bytenr,
727                                   u64 num_bytes,
728                                   u64 orig_parent, u64 parent,
729                                   u64 orig_root, u64 ref_root,
730                                   u64 orig_generation, u64 ref_generation,
731                                   u64 owner_objectid)
732 {
733         int ret;
734
735         ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent, ref_root,
736                                     ref_generation, owner_objectid,
737                                     BTRFS_ADD_DELAYED_REF, 0);
738         BUG_ON(ret);
739         return ret;
740 }
741
742 static noinline_for_stack int add_extent_ref(struct btrfs_trans_handle *trans,
743                           struct btrfs_root *root, u64 bytenr,
744                           u64 num_bytes, u64 parent, u64 ref_root,
745                           u64 ref_generation, u64 owner_objectid,
746                           int refs_to_add)
747 {
748         struct btrfs_path *path;
749         int ret;
750         struct btrfs_key key;
751         struct extent_buffer *l;
752         struct btrfs_extent_item *item;
753         u32 refs;
754
755         path = btrfs_alloc_path();
756         if (!path)
757                 return -ENOMEM;
758
759         path->reada = 1;
760         path->leave_spinning = 1;
761         key.objectid = bytenr;
762         key.type = BTRFS_EXTENT_ITEM_KEY;
763         key.offset = num_bytes;
764
765         /* first find the extent item and update its reference count */
766         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
767                                 path, 0, 1);
768         if (ret < 0) {
769                 btrfs_set_path_blocking(path);
770                 return ret;
771         }
772
773         if (ret > 0) {
774                 WARN_ON(1);
775                 btrfs_free_path(path);
776                 return -EIO;
777         }
778         l = path->nodes[0];
779
780         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
781         if (key.objectid != bytenr) {
782                 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
783                 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
784                        (unsigned long long)bytenr,
785                        (unsigned long long)key.objectid);
786                 BUG();
787         }
788         BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
789
790         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
791
792         refs = btrfs_extent_refs(l, item);
793         btrfs_set_extent_refs(l, item, refs + refs_to_add);
794         btrfs_unlock_up_safe(path, 1);
795
796         btrfs_mark_buffer_dirty(path->nodes[0]);
797
798         btrfs_release_path(root->fs_info->extent_root, path);
799
800         path->reada = 1;
801         path->leave_spinning = 1;
802
803         /* now insert the actual backref */
804         ret = insert_extent_backref(trans, root->fs_info->extent_root,
805                                     path, bytenr, parent,
806                                     ref_root, ref_generation,
807                                     owner_objectid, refs_to_add);
808         BUG_ON(ret);
809         btrfs_free_path(path);
810         return 0;
811 }
812
813 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
814                          struct btrfs_root *root,
815                          u64 bytenr, u64 num_bytes, u64 parent,
816                          u64 ref_root, u64 ref_generation,
817                          u64 owner_objectid)
818 {
819         int ret;
820         if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
821             owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
822                 return 0;
823
824         ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, parent,
825                                      0, ref_root, 0, ref_generation,
826                                      owner_objectid);
827         return ret;
828 }
829
830 static int drop_delayed_ref(struct btrfs_trans_handle *trans,
831                                         struct btrfs_root *root,
832                                         struct btrfs_delayed_ref_node *node)
833 {
834         int ret = 0;
835         struct btrfs_delayed_ref *ref = btrfs_delayed_node_to_ref(node);
836
837         BUG_ON(node->ref_mod == 0);
838         ret = __btrfs_free_extent(trans, root, node->bytenr, node->num_bytes,
839                                   node->parent, ref->root, ref->generation,
840                                   ref->owner_objectid, ref->pin, node->ref_mod);
841
842         return ret;
843 }
844
845 /* helper function to actually process a single delayed ref entry */
846 static noinline int run_one_delayed_ref(struct btrfs_trans_handle *trans,
847                                         struct btrfs_root *root,
848                                         struct btrfs_delayed_ref_node *node,
849                                         int insert_reserved)
850 {
851         int ret;
852         struct btrfs_delayed_ref *ref;
853
854         if (node->parent == (u64)-1) {
855                 struct btrfs_delayed_ref_head *head;
856                 /*
857                  * we've hit the end of the chain and we were supposed
858                  * to insert this extent into the tree.  But, it got
859                  * deleted before we ever needed to insert it, so all
860                  * we have to do is clean up the accounting
861                  */
862                 if (insert_reserved) {
863                         update_reserved_extents(root, node->bytenr,
864                                                 node->num_bytes, 0);
865                 }
866                 head = btrfs_delayed_node_to_head(node);
867                 mutex_unlock(&head->mutex);
868                 return 0;
869         }
870
871         ref = btrfs_delayed_node_to_ref(node);
872         if (ref->action == BTRFS_ADD_DELAYED_REF) {
873                 if (insert_reserved) {
874                         struct btrfs_key ins;
875
876                         ins.objectid = node->bytenr;
877                         ins.offset = node->num_bytes;
878                         ins.type = BTRFS_EXTENT_ITEM_KEY;
879
880                         /* record the full extent allocation */
881                         ret = __btrfs_alloc_reserved_extent(trans, root,
882                                         node->parent, ref->root,
883                                         ref->generation, ref->owner_objectid,
884                                         &ins, node->ref_mod);
885                         update_reserved_extents(root, node->bytenr,
886                                                 node->num_bytes, 0);
887                 } else {
888                         /* just add one backref */
889                         ret = add_extent_ref(trans, root, node->bytenr,
890                                      node->num_bytes,
891                                      node->parent, ref->root, ref->generation,
892                                      ref->owner_objectid, node->ref_mod);
893                 }
894                 BUG_ON(ret);
895         } else if (ref->action == BTRFS_DROP_DELAYED_REF) {
896                 WARN_ON(insert_reserved);
897                 ret = drop_delayed_ref(trans, root, node);
898         }
899         return 0;
900 }
901
902 static noinline struct btrfs_delayed_ref_node *
903 select_delayed_ref(struct btrfs_delayed_ref_head *head)
904 {
905         struct rb_node *node;
906         struct btrfs_delayed_ref_node *ref;
907         int action = BTRFS_ADD_DELAYED_REF;
908 again:
909         /*
910          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
911          * this prevents ref count from going down to zero when
912          * there still are pending delayed ref.
913          */
914         node = rb_prev(&head->node.rb_node);
915         while (1) {
916                 if (!node)
917                         break;
918                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
919                                 rb_node);
920                 if (ref->bytenr != head->node.bytenr)
921                         break;
922                 if (btrfs_delayed_node_to_ref(ref)->action == action)
923                         return ref;
924                 node = rb_prev(node);
925         }
926         if (action == BTRFS_ADD_DELAYED_REF) {
927                 action = BTRFS_DROP_DELAYED_REF;
928                 goto again;
929         }
930         return NULL;
931 }
932
933 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
934                                        struct btrfs_root *root,
935                                        struct list_head *cluster)
936 {
937         struct btrfs_delayed_ref_root *delayed_refs;
938         struct btrfs_delayed_ref_node *ref;
939         struct btrfs_delayed_ref_head *locked_ref = NULL;
940         int ret;
941         int count = 0;
942         int must_insert_reserved = 0;
943
944         delayed_refs = &trans->transaction->delayed_refs;
945         while (1) {
946                 if (!locked_ref) {
947                         /* pick a new head ref from the cluster list */
948                         if (list_empty(cluster))
949                                 break;
950
951                         locked_ref = list_entry(cluster->next,
952                                      struct btrfs_delayed_ref_head, cluster);
953
954                         /* grab the lock that says we are going to process
955                          * all the refs for this head */
956                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
957
958                         /*
959                          * we may have dropped the spin lock to get the head
960                          * mutex lock, and that might have given someone else
961                          * time to free the head.  If that's true, it has been
962                          * removed from our list and we can move on.
963                          */
964                         if (ret == -EAGAIN) {
965                                 locked_ref = NULL;
966                                 count++;
967                                 continue;
968                         }
969                 }
970
971                 /*
972                  * record the must insert reserved flag before we
973                  * drop the spin lock.
974                  */
975                 must_insert_reserved = locked_ref->must_insert_reserved;
976                 locked_ref->must_insert_reserved = 0;
977
978                 /*
979                  * locked_ref is the head node, so we have to go one
980                  * node back for any delayed ref updates
981                  */
982                 ref = select_delayed_ref(locked_ref);
983                 if (!ref) {
984                         /* All delayed refs have been processed, Go ahead
985                          * and send the head node to run_one_delayed_ref,
986                          * so that any accounting fixes can happen
987                          */
988                         ref = &locked_ref->node;
989                         list_del_init(&locked_ref->cluster);
990                         locked_ref = NULL;
991                 }
992
993                 ref->in_tree = 0;
994                 rb_erase(&ref->rb_node, &delayed_refs->root);
995                 delayed_refs->num_entries--;
996                 spin_unlock(&delayed_refs->lock);
997
998                 ret = run_one_delayed_ref(trans, root, ref,
999                                           must_insert_reserved);
1000                 BUG_ON(ret);
1001                 btrfs_put_delayed_ref(ref);
1002
1003                 count++;
1004                 cond_resched();
1005                 spin_lock(&delayed_refs->lock);
1006         }
1007         return count;
1008 }
1009
1010 /*
1011  * this starts processing the delayed reference count updates and
1012  * extent insertions we have queued up so far.  count can be
1013  * 0, which means to process everything in the tree at the start
1014  * of the run (but not newly added entries), or it can be some target
1015  * number you'd like to process.
1016  */
1017 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1018                            struct btrfs_root *root, unsigned long count)
1019 {
1020         struct rb_node *node;
1021         struct btrfs_delayed_ref_root *delayed_refs;
1022         struct btrfs_delayed_ref_node *ref;
1023         struct list_head cluster;
1024         int ret;
1025         int run_all = count == (unsigned long)-1;
1026         int run_most = 0;
1027
1028         if (root == root->fs_info->extent_root)
1029                 root = root->fs_info->tree_root;
1030
1031         delayed_refs = &trans->transaction->delayed_refs;
1032         INIT_LIST_HEAD(&cluster);
1033 again:
1034         spin_lock(&delayed_refs->lock);
1035         if (count == 0) {
1036                 count = delayed_refs->num_entries * 2;
1037                 run_most = 1;
1038         }
1039         while (1) {
1040                 if (!(run_all || run_most) &&
1041                     delayed_refs->num_heads_ready < 64)
1042                         break;
1043
1044                 /*
1045                  * go find something we can process in the rbtree.  We start at
1046                  * the beginning of the tree, and then build a cluster
1047                  * of refs to process starting at the first one we are able to
1048                  * lock
1049                  */
1050                 ret = btrfs_find_ref_cluster(trans, &cluster,
1051                                              delayed_refs->run_delayed_start);
1052                 if (ret)
1053                         break;
1054
1055                 ret = run_clustered_refs(trans, root, &cluster);
1056                 BUG_ON(ret < 0);
1057
1058                 count -= min_t(unsigned long, ret, count);
1059
1060                 if (count == 0)
1061                         break;
1062         }
1063
1064         if (run_all) {
1065                 node = rb_first(&delayed_refs->root);
1066                 if (!node)
1067                         goto out;
1068                 count = (unsigned long)-1;
1069
1070                 while (node) {
1071                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
1072                                        rb_node);
1073                         if (btrfs_delayed_ref_is_head(ref)) {
1074                                 struct btrfs_delayed_ref_head *head;
1075
1076                                 head = btrfs_delayed_node_to_head(ref);
1077                                 atomic_inc(&ref->refs);
1078
1079                                 spin_unlock(&delayed_refs->lock);
1080                                 mutex_lock(&head->mutex);
1081                                 mutex_unlock(&head->mutex);
1082
1083                                 btrfs_put_delayed_ref(ref);
1084                                 cond_resched();
1085                                 goto again;
1086                         }
1087                         node = rb_next(node);
1088                 }
1089                 spin_unlock(&delayed_refs->lock);
1090                 schedule_timeout(1);
1091                 goto again;
1092         }
1093 out:
1094         spin_unlock(&delayed_refs->lock);
1095         return 0;
1096 }
1097
1098 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1099                           struct btrfs_root *root, u64 objectid, u64 bytenr)
1100 {
1101         struct btrfs_root *extent_root = root->fs_info->extent_root;
1102         struct btrfs_path *path;
1103         struct extent_buffer *leaf;
1104         struct btrfs_extent_ref *ref_item;
1105         struct btrfs_key key;
1106         struct btrfs_key found_key;
1107         u64 ref_root;
1108         u64 last_snapshot;
1109         u32 nritems;
1110         int ret;
1111
1112         key.objectid = bytenr;
1113         key.offset = (u64)-1;
1114         key.type = BTRFS_EXTENT_ITEM_KEY;
1115
1116         path = btrfs_alloc_path();
1117         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1118         if (ret < 0)
1119                 goto out;
1120         BUG_ON(ret == 0);
1121
1122         ret = -ENOENT;
1123         if (path->slots[0] == 0)
1124                 goto out;
1125
1126         path->slots[0]--;
1127         leaf = path->nodes[0];
1128         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1129
1130         if (found_key.objectid != bytenr ||
1131             found_key.type != BTRFS_EXTENT_ITEM_KEY)
1132                 goto out;
1133
1134         last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1135         while (1) {
1136                 leaf = path->nodes[0];
1137                 nritems = btrfs_header_nritems(leaf);
1138                 if (path->slots[0] >= nritems) {
1139                         ret = btrfs_next_leaf(extent_root, path);
1140                         if (ret < 0)
1141                                 goto out;
1142                         if (ret == 0)
1143                                 continue;
1144                         break;
1145                 }
1146                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1147                 if (found_key.objectid != bytenr)
1148                         break;
1149
1150                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1151                         path->slots[0]++;
1152                         continue;
1153                 }
1154
1155                 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1156                                           struct btrfs_extent_ref);
1157                 ref_root = btrfs_ref_root(leaf, ref_item);
1158                 if ((ref_root != root->root_key.objectid &&
1159                      ref_root != BTRFS_TREE_LOG_OBJECTID) ||
1160                      objectid != btrfs_ref_objectid(leaf, ref_item)) {
1161                         ret = 1;
1162                         goto out;
1163                 }
1164                 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1165                         ret = 1;
1166                         goto out;
1167                 }
1168
1169                 path->slots[0]++;
1170         }
1171         ret = 0;
1172 out:
1173         btrfs_free_path(path);
1174         return ret;
1175 }
1176
1177 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1178                     struct extent_buffer *buf, u32 nr_extents)
1179 {
1180         struct btrfs_key key;
1181         struct btrfs_file_extent_item *fi;
1182         u64 root_gen;
1183         u32 nritems;
1184         int i;
1185         int level;
1186         int ret = 0;
1187         int shared = 0;
1188
1189         if (!root->ref_cows)
1190                 return 0;
1191
1192         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1193                 shared = 0;
1194                 root_gen = root->root_key.offset;
1195         } else {
1196                 shared = 1;
1197                 root_gen = trans->transid - 1;
1198         }
1199
1200         level = btrfs_header_level(buf);
1201         nritems = btrfs_header_nritems(buf);
1202
1203         if (level == 0) {
1204                 struct btrfs_leaf_ref *ref;
1205                 struct btrfs_extent_info *info;
1206
1207                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1208                 if (!ref) {
1209                         ret = -ENOMEM;
1210                         goto out;
1211                 }
1212
1213                 ref->root_gen = root_gen;
1214                 ref->bytenr = buf->start;
1215                 ref->owner = btrfs_header_owner(buf);
1216                 ref->generation = btrfs_header_generation(buf);
1217                 ref->nritems = nr_extents;
1218                 info = ref->extents;
1219
1220                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1221                         u64 disk_bytenr;
1222                         btrfs_item_key_to_cpu(buf, &key, i);
1223                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1224                                 continue;
1225                         fi = btrfs_item_ptr(buf, i,
1226                                             struct btrfs_file_extent_item);
1227                         if (btrfs_file_extent_type(buf, fi) ==
1228                             BTRFS_FILE_EXTENT_INLINE)
1229                                 continue;
1230                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1231                         if (disk_bytenr == 0)
1232                                 continue;
1233
1234                         info->bytenr = disk_bytenr;
1235                         info->num_bytes =
1236                                 btrfs_file_extent_disk_num_bytes(buf, fi);
1237                         info->objectid = key.objectid;
1238                         info->offset = key.offset;
1239                         info++;
1240                 }
1241
1242                 ret = btrfs_add_leaf_ref(root, ref, shared);
1243                 if (ret == -EEXIST && shared) {
1244                         struct btrfs_leaf_ref *old;
1245                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1246                         BUG_ON(!old);
1247                         btrfs_remove_leaf_ref(root, old);
1248                         btrfs_free_leaf_ref(root, old);
1249                         ret = btrfs_add_leaf_ref(root, ref, shared);
1250                 }
1251                 WARN_ON(ret);
1252                 btrfs_free_leaf_ref(root, ref);
1253         }
1254 out:
1255         return ret;
1256 }
1257
1258 /* when a block goes through cow, we update the reference counts of
1259  * everything that block points to.  The internal pointers of the block
1260  * can be in just about any order, and it is likely to have clusters of
1261  * things that are close together and clusters of things that are not.
1262  *
1263  * To help reduce the seeks that come with updating all of these reference
1264  * counts, sort them by byte number before actual updates are done.
1265  *
1266  * struct refsort is used to match byte number to slot in the btree block.
1267  * we sort based on the byte number and then use the slot to actually
1268  * find the item.
1269  *
1270  * struct refsort is smaller than strcut btrfs_item and smaller than
1271  * struct btrfs_key_ptr.  Since we're currently limited to the page size
1272  * for a btree block, there's no way for a kmalloc of refsorts for a
1273  * single node to be bigger than a page.
1274  */
1275 struct refsort {
1276         u64 bytenr;
1277         u32 slot;
1278 };
1279
1280 /*
1281  * for passing into sort()
1282  */
1283 static int refsort_cmp(const void *a_void, const void *b_void)
1284 {
1285         const struct refsort *a = a_void;
1286         const struct refsort *b = b_void;
1287
1288         if (a->bytenr < b->bytenr)
1289                 return -1;
1290         if (a->bytenr > b->bytenr)
1291                 return 1;
1292         return 0;
1293 }
1294
1295
1296 noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1297                            struct btrfs_root *root,
1298                            struct extent_buffer *orig_buf,
1299                            struct extent_buffer *buf, u32 *nr_extents)
1300 {
1301         u64 bytenr;
1302         u64 ref_root;
1303         u64 orig_root;
1304         u64 ref_generation;
1305         u64 orig_generation;
1306         struct refsort *sorted;
1307         u32 nritems;
1308         u32 nr_file_extents = 0;
1309         struct btrfs_key key;
1310         struct btrfs_file_extent_item *fi;
1311         int i;
1312         int level;
1313         int ret = 0;
1314         int faili = 0;
1315         int refi = 0;
1316         int slot;
1317         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1318                             u64, u64, u64, u64, u64, u64, u64, u64, u64);
1319
1320         ref_root = btrfs_header_owner(buf);
1321         ref_generation = btrfs_header_generation(buf);
1322         orig_root = btrfs_header_owner(orig_buf);
1323         orig_generation = btrfs_header_generation(orig_buf);
1324
1325         nritems = btrfs_header_nritems(buf);
1326         level = btrfs_header_level(buf);
1327
1328         sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1329         BUG_ON(!sorted);
1330
1331         if (root->ref_cows) {
1332                 process_func = __btrfs_inc_extent_ref;
1333         } else {
1334                 if (level == 0 &&
1335                     root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1336                         goto out;
1337                 if (level != 0 &&
1338                     root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1339                         goto out;
1340                 process_func = __btrfs_update_extent_ref;
1341         }
1342
1343         /*
1344          * we make two passes through the items.  In the first pass we
1345          * only record the byte number and slot.  Then we sort based on
1346          * byte number and do the actual work based on the sorted results
1347          */
1348         for (i = 0; i < nritems; i++) {
1349                 cond_resched();
1350                 if (level == 0) {
1351                         btrfs_item_key_to_cpu(buf, &key, i);
1352                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1353                                 continue;
1354                         fi = btrfs_item_ptr(buf, i,
1355                                             struct btrfs_file_extent_item);
1356                         if (btrfs_file_extent_type(buf, fi) ==
1357                             BTRFS_FILE_EXTENT_INLINE)
1358                                 continue;
1359                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1360                         if (bytenr == 0)
1361                                 continue;
1362
1363                         nr_file_extents++;
1364                         sorted[refi].bytenr = bytenr;
1365                         sorted[refi].slot = i;
1366                         refi++;
1367                 } else {
1368                         bytenr = btrfs_node_blockptr(buf, i);
1369                         sorted[refi].bytenr = bytenr;
1370                         sorted[refi].slot = i;
1371                         refi++;
1372                 }
1373         }
1374         /*
1375          * if refi == 0, we didn't actually put anything into the sorted
1376          * array and we're done
1377          */
1378         if (refi == 0)
1379                 goto out;
1380
1381         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1382
1383         for (i = 0; i < refi; i++) {
1384                 cond_resched();
1385                 slot = sorted[i].slot;
1386                 bytenr = sorted[i].bytenr;
1387
1388                 if (level == 0) {
1389                         btrfs_item_key_to_cpu(buf, &key, slot);
1390                         fi = btrfs_item_ptr(buf, slot,
1391                                             struct btrfs_file_extent_item);
1392
1393                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1394                         if (bytenr == 0)
1395                                 continue;
1396
1397                         ret = process_func(trans, root, bytenr,
1398                                    btrfs_file_extent_disk_num_bytes(buf, fi),
1399                                    orig_buf->start, buf->start,
1400                                    orig_root, ref_root,
1401                                    orig_generation, ref_generation,
1402                                    key.objectid);
1403
1404                         if (ret) {
1405                                 faili = slot;
1406                                 WARN_ON(1);
1407                                 goto fail;
1408                         }
1409                 } else {
1410                         ret = process_func(trans, root, bytenr, buf->len,
1411                                            orig_buf->start, buf->start,
1412                                            orig_root, ref_root,
1413                                            orig_generation, ref_generation,
1414                                            level - 1);
1415                         if (ret) {
1416                                 faili = slot;
1417                                 WARN_ON(1);
1418                                 goto fail;
1419                         }
1420                 }
1421         }
1422 out:
1423         kfree(sorted);
1424         if (nr_extents) {
1425                 if (level == 0)
1426                         *nr_extents = nr_file_extents;
1427                 else
1428                         *nr_extents = nritems;
1429         }
1430         return 0;
1431 fail:
1432         kfree(sorted);
1433         WARN_ON(1);
1434         return ret;
1435 }
1436
1437 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1438                      struct btrfs_root *root, struct extent_buffer *orig_buf,
1439                      struct extent_buffer *buf, int start_slot, int nr)
1440
1441 {
1442         u64 bytenr;
1443         u64 ref_root;
1444         u64 orig_root;
1445         u64 ref_generation;
1446         u64 orig_generation;
1447         struct btrfs_key key;
1448         struct btrfs_file_extent_item *fi;
1449         int i;
1450         int ret;
1451         int slot;
1452         int level;
1453
1454         BUG_ON(start_slot < 0);
1455         BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1456
1457         ref_root = btrfs_header_owner(buf);
1458         ref_generation = btrfs_header_generation(buf);
1459         orig_root = btrfs_header_owner(orig_buf);
1460         orig_generation = btrfs_header_generation(orig_buf);
1461         level = btrfs_header_level(buf);
1462
1463         if (!root->ref_cows) {
1464                 if (level == 0 &&
1465                     root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1466                         return 0;
1467                 if (level != 0 &&
1468                     root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1469                         return 0;
1470         }
1471
1472         for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1473                 cond_resched();
1474                 if (level == 0) {
1475                         btrfs_item_key_to_cpu(buf, &key, slot);
1476                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1477                                 continue;
1478                         fi = btrfs_item_ptr(buf, slot,
1479                                             struct btrfs_file_extent_item);
1480                         if (btrfs_file_extent_type(buf, fi) ==
1481                             BTRFS_FILE_EXTENT_INLINE)
1482                                 continue;
1483                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1484                         if (bytenr == 0)
1485                                 continue;
1486                         ret = __btrfs_update_extent_ref(trans, root, bytenr,
1487                                     btrfs_file_extent_disk_num_bytes(buf, fi),
1488                                     orig_buf->start, buf->start,
1489                                     orig_root, ref_root, orig_generation,
1490                                     ref_generation, key.objectid);
1491                         if (ret)
1492                                 goto fail;
1493                 } else {
1494                         bytenr = btrfs_node_blockptr(buf, slot);
1495                         ret = __btrfs_update_extent_ref(trans, root, bytenr,
1496                                             buf->len, orig_buf->start,
1497                                             buf->start, orig_root, ref_root,
1498                                             orig_generation, ref_generation,
1499                                             level - 1);
1500                         if (ret)
1501                                 goto fail;
1502                 }
1503         }
1504         return 0;
1505 fail:
1506         WARN_ON(1);
1507         return -1;
1508 }
1509
1510 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1511                                  struct btrfs_root *root,
1512                                  struct btrfs_path *path,
1513                                  struct btrfs_block_group_cache *cache)
1514 {
1515         int ret;
1516         struct btrfs_root *extent_root = root->fs_info->extent_root;
1517         unsigned long bi;
1518         struct extent_buffer *leaf;
1519
1520         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1521         if (ret < 0)
1522                 goto fail;
1523         BUG_ON(ret);
1524
1525         leaf = path->nodes[0];
1526         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1527         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1528         btrfs_mark_buffer_dirty(leaf);
1529         btrfs_release_path(extent_root, path);
1530 fail:
1531         if (ret)
1532                 return ret;
1533         return 0;
1534
1535 }
1536
1537 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1538                                    struct btrfs_root *root)
1539 {
1540         struct btrfs_block_group_cache *cache, *entry;
1541         struct rb_node *n;
1542         int err = 0;
1543         int werr = 0;
1544         struct btrfs_path *path;
1545         u64 last = 0;
1546
1547         path = btrfs_alloc_path();
1548         if (!path)
1549                 return -ENOMEM;
1550
1551         while (1) {
1552                 cache = NULL;
1553                 spin_lock(&root->fs_info->block_group_cache_lock);
1554                 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1555                      n; n = rb_next(n)) {
1556                         entry = rb_entry(n, struct btrfs_block_group_cache,
1557                                          cache_node);
1558                         if (entry->dirty) {
1559                                 cache = entry;
1560                                 break;
1561                         }
1562                 }
1563                 spin_unlock(&root->fs_info->block_group_cache_lock);
1564
1565                 if (!cache)
1566                         break;
1567
1568                 cache->dirty = 0;
1569                 last += cache->key.offset;
1570
1571                 err = write_one_cache_group(trans, root,
1572                                             path, cache);
1573                 /*
1574                  * if we fail to write the cache group, we want
1575                  * to keep it marked dirty in hopes that a later
1576                  * write will work
1577                  */
1578                 if (err) {
1579                         werr = err;
1580                         continue;
1581                 }
1582         }
1583         btrfs_free_path(path);
1584         return werr;
1585 }
1586
1587 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
1588 {
1589         struct btrfs_block_group_cache *block_group;
1590         int readonly = 0;
1591
1592         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1593         if (!block_group || block_group->ro)
1594                 readonly = 1;
1595         if (block_group)
1596                 btrfs_put_block_group(block_group);
1597         return readonly;
1598 }
1599
1600 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1601                              u64 total_bytes, u64 bytes_used,
1602                              struct btrfs_space_info **space_info)
1603 {
1604         struct btrfs_space_info *found;
1605
1606         found = __find_space_info(info, flags);
1607         if (found) {
1608                 spin_lock(&found->lock);
1609                 found->total_bytes += total_bytes;
1610                 found->bytes_used += bytes_used;
1611                 found->full = 0;
1612                 spin_unlock(&found->lock);
1613                 *space_info = found;
1614                 return 0;
1615         }
1616         found = kzalloc(sizeof(*found), GFP_NOFS);
1617         if (!found)
1618                 return -ENOMEM;
1619
1620         INIT_LIST_HEAD(&found->block_groups);
1621         init_rwsem(&found->groups_sem);
1622         spin_lock_init(&found->lock);
1623         found->flags = flags;
1624         found->total_bytes = total_bytes;
1625         found->bytes_used = bytes_used;
1626         found->bytes_pinned = 0;
1627         found->bytes_reserved = 0;
1628         found->bytes_readonly = 0;
1629         found->bytes_delalloc = 0;
1630         found->full = 0;
1631         found->force_alloc = 0;
1632         *space_info = found;
1633         list_add_rcu(&found->list, &info->space_info);
1634         return 0;
1635 }
1636
1637 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1638 {
1639         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1640                                    BTRFS_BLOCK_GROUP_RAID1 |
1641                                    BTRFS_BLOCK_GROUP_RAID10 |
1642                                    BTRFS_BLOCK_GROUP_DUP);
1643         if (extra_flags) {
1644                 if (flags & BTRFS_BLOCK_GROUP_DATA)
1645                         fs_info->avail_data_alloc_bits |= extra_flags;
1646                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1647                         fs_info->avail_metadata_alloc_bits |= extra_flags;
1648                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1649                         fs_info->avail_system_alloc_bits |= extra_flags;
1650         }
1651 }
1652
1653 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1654 {
1655         spin_lock(&cache->space_info->lock);
1656         spin_lock(&cache->lock);
1657         if (!cache->ro) {
1658                 cache->space_info->bytes_readonly += cache->key.offset -
1659                                         btrfs_block_group_used(&cache->item);
1660                 cache->ro = 1;
1661         }
1662         spin_unlock(&cache->lock);
1663         spin_unlock(&cache->space_info->lock);
1664 }
1665
1666 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1667 {
1668         u64 num_devices = root->fs_info->fs_devices->rw_devices;
1669
1670         if (num_devices == 1)
1671                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1672         if (num_devices < 4)
1673                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1674
1675         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1676             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1677                       BTRFS_BLOCK_GROUP_RAID10))) {
1678                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1679         }
1680
1681         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1682             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1683                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1684         }
1685
1686         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1687             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1688              (flags & BTRFS_BLOCK_GROUP_RAID10) |
1689              (flags & BTRFS_BLOCK_GROUP_DUP)))
1690                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1691         return flags;
1692 }
1693
1694 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
1695 {
1696         struct btrfs_fs_info *info = root->fs_info;
1697         u64 alloc_profile;
1698
1699         if (data) {
1700                 alloc_profile = info->avail_data_alloc_bits &
1701                         info->data_alloc_profile;
1702                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
1703         } else if (root == root->fs_info->chunk_root) {
1704                 alloc_profile = info->avail_system_alloc_bits &
1705                         info->system_alloc_profile;
1706                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
1707         } else {
1708                 alloc_profile = info->avail_metadata_alloc_bits &
1709                         info->metadata_alloc_profile;
1710                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
1711         }
1712
1713         return btrfs_reduce_alloc_profile(root, data);
1714 }
1715
1716 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
1717 {
1718         u64 alloc_target;
1719
1720         alloc_target = btrfs_get_alloc_profile(root, 1);
1721         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
1722                                                        alloc_target);
1723 }
1724
1725 /*
1726  * for now this just makes sure we have at least 5% of our metadata space free
1727  * for use.
1728  */
1729 int btrfs_check_metadata_free_space(struct btrfs_root *root)
1730 {
1731         struct btrfs_fs_info *info = root->fs_info;
1732         struct btrfs_space_info *meta_sinfo;
1733         u64 alloc_target, thresh;
1734         int committed = 0, ret;
1735
1736         /* get the space info for where the metadata will live */
1737         alloc_target = btrfs_get_alloc_profile(root, 0);
1738         meta_sinfo = __find_space_info(info, alloc_target);
1739
1740 again:
1741         spin_lock(&meta_sinfo->lock);
1742         if (!meta_sinfo->full)
1743                 thresh = meta_sinfo->total_bytes * 80;
1744         else
1745                 thresh = meta_sinfo->total_bytes * 95;
1746
1747         do_div(thresh, 100);
1748
1749         if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
1750             meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
1751                 struct btrfs_trans_handle *trans;
1752                 if (!meta_sinfo->full) {
1753                         meta_sinfo->force_alloc = 1;
1754                         spin_unlock(&meta_sinfo->lock);
1755
1756                         trans = btrfs_start_transaction(root, 1);
1757                         if (!trans)
1758                                 return -ENOMEM;
1759
1760                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
1761                                              2 * 1024 * 1024, alloc_target, 0);
1762                         btrfs_end_transaction(trans, root);
1763                         goto again;
1764                 }
1765                 spin_unlock(&meta_sinfo->lock);
1766
1767                 if (!committed) {
1768                         committed = 1;
1769                         trans = btrfs_join_transaction(root, 1);
1770                         if (!trans)
1771                                 return -ENOMEM;
1772                         ret = btrfs_commit_transaction(trans, root);
1773                         if (ret)
1774                                 return ret;
1775                         goto again;
1776                 }
1777                 return -ENOSPC;
1778         }
1779         spin_unlock(&meta_sinfo->lock);
1780
1781         return 0;
1782 }
1783
1784 /*
1785  * This will check the space that the inode allocates from to make sure we have
1786  * enough space for bytes.
1787  */
1788 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
1789                                 u64 bytes)
1790 {
1791         struct btrfs_space_info *data_sinfo;
1792         int ret = 0, committed = 0;
1793
1794         /* make sure bytes are sectorsize aligned */
1795         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
1796
1797         data_sinfo = BTRFS_I(inode)->space_info;
1798 again:
1799         /* make sure we have enough space to handle the data first */
1800         spin_lock(&data_sinfo->lock);
1801         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
1802             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
1803             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
1804             data_sinfo->bytes_may_use < bytes) {
1805                 struct btrfs_trans_handle *trans;
1806
1807                 /*
1808                  * if we don't have enough free bytes in this space then we need
1809                  * to alloc a new chunk.
1810                  */
1811                 if (!data_sinfo->full) {
1812                         u64 alloc_target;
1813
1814                         data_sinfo->force_alloc = 1;
1815                         spin_unlock(&data_sinfo->lock);
1816
1817                         alloc_target = btrfs_get_alloc_profile(root, 1);
1818                         trans = btrfs_start_transaction(root, 1);
1819                         if (!trans)
1820                                 return -ENOMEM;
1821
1822                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
1823                                              bytes + 2 * 1024 * 1024,
1824                                              alloc_target, 0);
1825                         btrfs_end_transaction(trans, root);
1826                         if (ret)
1827                                 return ret;
1828                         goto again;
1829                 }
1830                 spin_unlock(&data_sinfo->lock);
1831
1832                 /* commit the current transaction and try again */
1833                 if (!committed) {
1834                         committed = 1;
1835                         trans = btrfs_join_transaction(root, 1);
1836                         if (!trans)
1837                                 return -ENOMEM;
1838                         ret = btrfs_commit_transaction(trans, root);
1839                         if (ret)
1840                                 return ret;
1841                         goto again;
1842                 }
1843
1844                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
1845                        ", %llu bytes_used, %llu bytes_reserved, "
1846                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
1847                        "%llu total\n", bytes, data_sinfo->bytes_delalloc,
1848                        data_sinfo->bytes_used, data_sinfo->bytes_reserved,
1849                        data_sinfo->bytes_pinned, data_sinfo->bytes_readonly,
1850                        data_sinfo->bytes_may_use, data_sinfo->total_bytes);
1851                 return -ENOSPC;
1852         }
1853         data_sinfo->bytes_may_use += bytes;
1854         BTRFS_I(inode)->reserved_bytes += bytes;
1855         spin_unlock(&data_sinfo->lock);
1856
1857         return btrfs_check_metadata_free_space(root);
1858 }
1859
1860 /*
1861  * if there was an error for whatever reason after calling
1862  * btrfs_check_data_free_space, call this so we can cleanup the counters.
1863  */
1864 void btrfs_free_reserved_data_space(struct btrfs_root *root,
1865                                     struct inode *inode, u64 bytes)
1866 {
1867         struct btrfs_space_info *data_sinfo;
1868
1869         /* make sure bytes are sectorsize aligned */
1870         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
1871
1872         data_sinfo = BTRFS_I(inode)->space_info;
1873         spin_lock(&data_sinfo->lock);
1874         data_sinfo->bytes_may_use -= bytes;
1875         BTRFS_I(inode)->reserved_bytes -= bytes;
1876         spin_unlock(&data_sinfo->lock);
1877 }
1878
1879 /* called when we are adding a delalloc extent to the inode's io_tree */
1880 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
1881                                   u64 bytes)
1882 {
1883         struct btrfs_space_info *data_sinfo;
1884
1885         /* get the space info for where this inode will be storing its data */
1886         data_sinfo = BTRFS_I(inode)->space_info;
1887
1888         /* make sure we have enough space to handle the data first */
1889         spin_lock(&data_sinfo->lock);
1890         data_sinfo->bytes_delalloc += bytes;
1891
1892         /*
1893          * we are adding a delalloc extent without calling
1894          * btrfs_check_data_free_space first.  This happens on a weird
1895          * writepage condition, but shouldn't hurt our accounting
1896          */
1897         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
1898                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
1899                 BTRFS_I(inode)->reserved_bytes = 0;
1900         } else {
1901                 data_sinfo->bytes_may_use -= bytes;
1902                 BTRFS_I(inode)->reserved_bytes -= bytes;
1903         }
1904
1905         spin_unlock(&data_sinfo->lock);
1906 }
1907
1908 /* called when we are clearing an delalloc extent from the inode's io_tree */
1909 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
1910                               u64 bytes)
1911 {
1912         struct btrfs_space_info *info;
1913
1914         info = BTRFS_I(inode)->space_info;
1915
1916         spin_lock(&info->lock);
1917         info->bytes_delalloc -= bytes;
1918         spin_unlock(&info->lock);
1919 }
1920
1921 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1922                           struct btrfs_root *extent_root, u64 alloc_bytes,
1923                           u64 flags, int force)
1924 {
1925         struct btrfs_space_info *space_info;
1926         u64 thresh;
1927         int ret = 0;
1928
1929         mutex_lock(&extent_root->fs_info->chunk_mutex);
1930
1931         flags = btrfs_reduce_alloc_profile(extent_root, flags);
1932
1933         space_info = __find_space_info(extent_root->fs_info, flags);
1934         if (!space_info) {
1935                 ret = update_space_info(extent_root->fs_info, flags,
1936                                         0, 0, &space_info);
1937                 BUG_ON(ret);
1938         }
1939         BUG_ON(!space_info);
1940
1941         spin_lock(&space_info->lock);
1942         if (space_info->force_alloc) {
1943                 force = 1;
1944                 space_info->force_alloc = 0;
1945         }
1946         if (space_info->full) {
1947                 spin_unlock(&space_info->lock);
1948                 goto out;
1949         }
1950
1951         thresh = space_info->total_bytes - space_info->bytes_readonly;
1952         thresh = div_factor(thresh, 6);
1953         if (!force &&
1954            (space_info->bytes_used + space_info->bytes_pinned +
1955             space_info->bytes_reserved + alloc_bytes) < thresh) {
1956                 spin_unlock(&space_info->lock);
1957                 goto out;
1958         }
1959         spin_unlock(&space_info->lock);
1960
1961         ret = btrfs_alloc_chunk(trans, extent_root, flags);
1962         if (ret)
1963                 space_info->full = 1;
1964 out:
1965         mutex_unlock(&extent_root->fs_info->chunk_mutex);
1966         return ret;
1967 }
1968
1969 static int update_block_group(struct btrfs_trans_handle *trans,
1970                               struct btrfs_root *root,
1971                               u64 bytenr, u64 num_bytes, int alloc,
1972                               int mark_free)
1973 {
1974         struct btrfs_block_group_cache *cache;
1975         struct btrfs_fs_info *info = root->fs_info;
1976         u64 total = num_bytes;
1977         u64 old_val;
1978         u64 byte_in_group;
1979
1980         while (total) {
1981                 cache = btrfs_lookup_block_group(info, bytenr);
1982                 if (!cache)
1983                         return -1;
1984                 byte_in_group = bytenr - cache->key.objectid;
1985                 WARN_ON(byte_in_group > cache->key.offset);
1986
1987                 spin_lock(&cache->space_info->lock);
1988                 spin_lock(&cache->lock);
1989                 cache->dirty = 1;
1990                 old_val = btrfs_block_group_used(&cache->item);
1991                 num_bytes = min(total, cache->key.offset - byte_in_group);
1992                 if (alloc) {
1993                         old_val += num_bytes;
1994                         cache->space_info->bytes_used += num_bytes;
1995                         if (cache->ro)
1996                                 cache->space_info->bytes_readonly -= num_bytes;
1997                         btrfs_set_block_group_used(&cache->item, old_val);
1998                         spin_unlock(&cache->lock);
1999                         spin_unlock(&cache->space_info->lock);
2000                 } else {
2001                         old_val -= num_bytes;
2002                         cache->space_info->bytes_used -= num_bytes;
2003                         if (cache->ro)
2004                                 cache->space_info->bytes_readonly += num_bytes;
2005                         btrfs_set_block_group_used(&cache->item, old_val);
2006                         spin_unlock(&cache->lock);
2007                         spin_unlock(&cache->space_info->lock);
2008                         if (mark_free) {
2009                                 int ret;
2010
2011                                 ret = btrfs_discard_extent(root, bytenr,
2012                                                            num_bytes);
2013                                 WARN_ON(ret);
2014
2015                                 ret = btrfs_add_free_space(cache, bytenr,
2016                                                            num_bytes);
2017                                 WARN_ON(ret);
2018                         }
2019                 }
2020                 btrfs_put_block_group(cache);
2021                 total -= num_bytes;
2022                 bytenr += num_bytes;
2023         }
2024         return 0;
2025 }
2026
2027 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
2028 {
2029         struct btrfs_block_group_cache *cache;
2030         u64 bytenr;
2031
2032         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
2033         if (!cache)
2034                 return 0;
2035
2036         bytenr = cache->key.objectid;
2037         btrfs_put_block_group(cache);
2038
2039         return bytenr;
2040 }
2041
2042 int btrfs_update_pinned_extents(struct btrfs_root *root,
2043                                 u64 bytenr, u64 num, int pin)
2044 {
2045         u64 len;
2046         struct btrfs_block_group_cache *cache;
2047         struct btrfs_fs_info *fs_info = root->fs_info;
2048
2049         if (pin) {
2050                 set_extent_dirty(&fs_info->pinned_extents,
2051                                 bytenr, bytenr + num - 1, GFP_NOFS);
2052         } else {
2053                 clear_extent_dirty(&fs_info->pinned_extents,
2054                                 bytenr, bytenr + num - 1, GFP_NOFS);
2055         }
2056
2057         while (num > 0) {
2058                 cache = btrfs_lookup_block_group(fs_info, bytenr);
2059                 BUG_ON(!cache);
2060                 len = min(num, cache->key.offset -
2061                           (bytenr - cache->key.objectid));
2062                 if (pin) {
2063                         spin_lock(&cache->space_info->lock);
2064                         spin_lock(&cache->lock);
2065                         cache->pinned += len;
2066                         cache->space_info->bytes_pinned += len;
2067                         spin_unlock(&cache->lock);
2068                         spin_unlock(&cache->space_info->lock);
2069                         fs_info->total_pinned += len;
2070                 } else {
2071                         spin_lock(&cache->space_info->lock);
2072                         spin_lock(&cache->lock);
2073                         cache->pinned -= len;
2074                         cache->space_info->bytes_pinned -= len;
2075                         spin_unlock(&cache->lock);
2076                         spin_unlock(&cache->space_info->lock);
2077                         fs_info->total_pinned -= len;
2078                         if (cache->cached)
2079                                 btrfs_add_free_space(cache, bytenr, len);
2080                 }
2081                 btrfs_put_block_group(cache);
2082                 bytenr += len;
2083                 num -= len;
2084         }
2085         return 0;
2086 }
2087
2088 static int update_reserved_extents(struct btrfs_root *root,
2089                                    u64 bytenr, u64 num, int reserve)
2090 {
2091         u64 len;
2092         struct btrfs_block_group_cache *cache;
2093         struct btrfs_fs_info *fs_info = root->fs_info;
2094
2095         while (num > 0) {
2096                 cache = btrfs_lookup_block_group(fs_info, bytenr);
2097                 BUG_ON(!cache);
2098                 len = min(num, cache->key.offset -
2099                           (bytenr - cache->key.objectid));
2100
2101                 spin_lock(&cache->space_info->lock);
2102                 spin_lock(&cache->lock);
2103                 if (reserve) {
2104                         cache->reserved += len;
2105                         cache->space_info->bytes_reserved += len;
2106                 } else {
2107                         cache->reserved -= len;
2108                         cache->space_info->bytes_reserved -= len;
2109                 }
2110                 spin_unlock(&cache->lock);
2111                 spin_unlock(&cache->space_info->lock);
2112                 btrfs_put_block_group(cache);
2113                 bytenr += len;
2114                 num -= len;
2115         }
2116         return 0;
2117 }
2118
2119 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2120 {
2121         u64 last = 0;
2122         u64 start;
2123         u64 end;
2124         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2125         int ret;
2126
2127         while (1) {
2128                 ret = find_first_extent_bit(pinned_extents, last,
2129                                             &start, &end, EXTENT_DIRTY);
2130                 if (ret)
2131                         break;
2132                 set_extent_dirty(copy, start, end, GFP_NOFS);
2133                 last = end + 1;
2134         }
2135         return 0;
2136 }
2137
2138 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2139                                struct btrfs_root *root,
2140                                struct extent_io_tree *unpin)
2141 {
2142         u64 start;
2143         u64 end;
2144         int ret;
2145
2146         while (1) {
2147                 ret = find_first_extent_bit(unpin, 0, &start, &end,
2148                                             EXTENT_DIRTY);
2149                 if (ret)
2150                         break;
2151
2152                 ret = btrfs_discard_extent(root, start, end + 1 - start);
2153
2154                 /* unlocks the pinned mutex */
2155                 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2156                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2157
2158                 cond_resched();
2159         }
2160         return ret;
2161 }
2162
2163 static int pin_down_bytes(struct btrfs_trans_handle *trans,
2164                           struct btrfs_root *root,
2165                           struct btrfs_path *path,
2166                           u64 bytenr, u64 num_bytes, int is_data,
2167                           struct extent_buffer **must_clean)
2168 {
2169         int err = 0;
2170         struct extent_buffer *buf;
2171
2172         if (is_data)
2173                 goto pinit;
2174
2175         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2176         if (!buf)
2177                 goto pinit;
2178
2179         /* we can reuse a block if it hasn't been written
2180          * and it is from this transaction.  We can't
2181          * reuse anything from the tree log root because
2182          * it has tiny sub-transactions.
2183          */
2184         if (btrfs_buffer_uptodate(buf, 0) &&
2185             btrfs_try_tree_lock(buf)) {
2186                 u64 header_owner = btrfs_header_owner(buf);
2187                 u64 header_transid = btrfs_header_generation(buf);
2188                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2189                     header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2190                     header_owner != BTRFS_DATA_RELOC_TREE_OBJECTID &&
2191                     header_transid == trans->transid &&
2192                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2193                         *must_clean = buf;
2194                         return 1;
2195                 }
2196                 btrfs_tree_unlock(buf);
2197         }
2198         free_extent_buffer(buf);
2199 pinit:
2200         btrfs_set_path_blocking(path);
2201         /* unlocks the pinned mutex */
2202         btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2203
2204         BUG_ON(err < 0);
2205         return 0;
2206 }
2207
2208 /*
2209  * remove an extent from the root, returns 0 on success
2210  */
2211 static int __free_extent(struct btrfs_trans_handle *trans,
2212                          struct btrfs_root *root,
2213                          u64 bytenr, u64 num_bytes, u64 parent,
2214                          u64 root_objectid, u64 ref_generation,
2215                          u64 owner_objectid, int pin, int mark_free,
2216                          int refs_to_drop)
2217 {
2218         struct btrfs_path *path;
2219         struct btrfs_key key;
2220         struct btrfs_fs_info *info = root->fs_info;
2221         struct btrfs_root *extent_root = info->extent_root;
2222         struct extent_buffer *leaf;
2223         int ret;
2224         int extent_slot = 0;
2225         int found_extent = 0;
2226         int num_to_del = 1;
2227         struct btrfs_extent_item *ei;
2228         u32 refs;
2229
2230         key.objectid = bytenr;
2231         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2232         key.offset = num_bytes;
2233         path = btrfs_alloc_path();
2234         if (!path)
2235                 return -ENOMEM;
2236
2237         path->reada = 1;
2238         path->leave_spinning = 1;
2239         ret = lookup_extent_backref(trans, extent_root, path,
2240                                     bytenr, parent, root_objectid,
2241                                     ref_generation, owner_objectid, 1);
2242         if (ret == 0) {
2243                 struct btrfs_key found_key;
2244                 extent_slot = path->slots[0];
2245                 while (extent_slot > 0) {
2246                         extent_slot--;
2247                         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2248                                               extent_slot);
2249                         if (found_key.objectid != bytenr)
2250                                 break;
2251                         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2252                             found_key.offset == num_bytes) {
2253                                 found_extent = 1;
2254                                 break;
2255                         }
2256                         if (path->slots[0] - extent_slot > 5)
2257                                 break;
2258                 }
2259                 if (!found_extent) {
2260                         ret = remove_extent_backref(trans, extent_root, path,
2261                                                     refs_to_drop);
2262                         BUG_ON(ret);
2263                         btrfs_release_path(extent_root, path);
2264                         path->leave_spinning = 1;
2265                         ret = btrfs_search_slot(trans, extent_root,
2266                                                 &key, path, -1, 1);
2267                         if (ret) {
2268                                 printk(KERN_ERR "umm, got %d back from search"
2269                                        ", was looking for %llu\n", ret,
2270                                        (unsigned long long)bytenr);
2271                                 btrfs_print_leaf(extent_root, path->nodes[0]);
2272                         }
2273                         BUG_ON(ret);
2274                         extent_slot = path->slots[0];
2275                 }
2276         } else {
2277                 btrfs_print_leaf(extent_root, path->nodes[0]);
2278                 WARN_ON(1);
2279                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2280                        "parent %llu root %llu gen %llu owner %llu\n",
2281                        (unsigned long long)bytenr,
2282                        (unsigned long long)parent,
2283                        (unsigned long long)root_objectid,
2284                        (unsigned long long)ref_generation,
2285                        (unsigned long long)owner_objectid);
2286         }
2287
2288         leaf = path->nodes[0];
2289         ei = btrfs_item_ptr(leaf, extent_slot,
2290                             struct btrfs_extent_item);
2291         refs = btrfs_extent_refs(leaf, ei);
2292
2293         /*
2294          * we're not allowed to delete the extent item if there
2295          * are other delayed ref updates pending
2296          */
2297
2298         BUG_ON(refs < refs_to_drop);
2299         refs -= refs_to_drop;
2300         btrfs_set_extent_refs(leaf, ei, refs);
2301         btrfs_mark_buffer_dirty(leaf);
2302
2303         if (refs == 0 && found_extent &&
2304             path->slots[0] == extent_slot + 1) {
2305                 struct btrfs_extent_ref *ref;
2306                 ref = btrfs_item_ptr(leaf, path->slots[0],
2307                                      struct btrfs_extent_ref);
2308                 BUG_ON(btrfs_ref_num_refs(leaf, ref) != refs_to_drop);
2309                 /* if the back ref and the extent are next to each other
2310                  * they get deleted below in one shot
2311                  */
2312                 path->slots[0] = extent_slot;
2313                 num_to_del = 2;
2314         } else if (found_extent) {
2315                 /* otherwise delete the extent back ref */
2316                 ret = remove_extent_backref(trans, extent_root, path,
2317                                             refs_to_drop);
2318                 BUG_ON(ret);
2319                 /* if refs are 0, we need to setup the path for deletion */
2320                 if (refs == 0) {
2321                         btrfs_release_path(extent_root, path);
2322                         path->leave_spinning = 1;
2323                         ret = btrfs_search_slot(trans, extent_root, &key, path,
2324                                                 -1, 1);
2325                         BUG_ON(ret);
2326                 }
2327         }
2328
2329         if (refs == 0) {
2330                 u64 super_used;
2331                 u64 root_used;
2332                 struct extent_buffer *must_clean = NULL;
2333
2334                 if (pin) {
2335                         ret = pin_down_bytes(trans, root, path,
2336                                 bytenr, num_bytes,
2337                                 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID,
2338                                 &must_clean);
2339                         if (ret > 0)
2340                                 mark_free = 1;
2341                         BUG_ON(ret < 0);
2342                 }
2343
2344                 /* block accounting for super block */
2345                 spin_lock(&info->delalloc_lock);
2346                 super_used = btrfs_super_bytes_used(&info->super_copy);
2347                 btrfs_set_super_bytes_used(&info->super_copy,
2348                                            super_used - num_bytes);
2349
2350                 /* block accounting for root item */
2351                 root_used = btrfs_root_used(&root->root_item);
2352                 btrfs_set_root_used(&root->root_item,
2353                                            root_used - num_bytes);
2354                 spin_unlock(&info->delalloc_lock);
2355
2356                 /*
2357                  * it is going to be very rare for someone to be waiting
2358                  * on the block we're freeing.  del_items might need to
2359                  * schedule, so rather than get fancy, just force it
2360                  * to blocking here
2361                  */
2362                 if (must_clean)
2363                         btrfs_set_lock_blocking(must_clean);
2364
2365                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2366                                       num_to_del);
2367                 BUG_ON(ret);
2368                 btrfs_release_path(extent_root, path);
2369
2370                 if (must_clean) {
2371                         clean_tree_block(NULL, root, must_clean);
2372                         btrfs_tree_unlock(must_clean);
2373                         free_extent_buffer(must_clean);
2374                 }
2375
2376                 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2377                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2378                         BUG_ON(ret);
2379                 } else {
2380                         invalidate_mapping_pages(info->btree_inode->i_mapping,
2381                              bytenr >> PAGE_CACHE_SHIFT,
2382                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
2383                 }
2384
2385                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2386                                          mark_free);
2387                 BUG_ON(ret);
2388         }
2389         btrfs_free_path(path);
2390         return ret;
2391 }
2392
2393 /*
2394  * remove an extent from the root, returns 0 on success
2395  */
2396 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2397                                         struct btrfs_root *root,
2398                                         u64 bytenr, u64 num_bytes, u64 parent,
2399                                         u64 root_objectid, u64 ref_generation,
2400                                         u64 owner_objectid, int pin,
2401                                         int refs_to_drop)
2402 {
2403         WARN_ON(num_bytes < root->sectorsize);
2404
2405         /*
2406          * if metadata always pin
2407          * if data pin when any transaction has committed this
2408          */
2409         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID ||
2410             ref_generation != trans->transid)
2411                 pin = 1;
2412
2413         if (ref_generation != trans->transid)
2414                 pin = 1;
2415
2416         return __free_extent(trans, root, bytenr, num_bytes, parent,
2417                             root_objectid, ref_generation,
2418                             owner_objectid, pin, pin == 0, refs_to_drop);
2419 }
2420
2421 /*
2422  * when we free an extent, it is possible (and likely) that we free the last
2423  * delayed ref for that extent as well.  This searches the delayed ref tree for
2424  * a given extent, and if there are no other delayed refs to be processed, it
2425  * removes it from the tree.
2426  */
2427 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
2428                                       struct btrfs_root *root, u64 bytenr)
2429 {
2430         struct btrfs_delayed_ref_head *head;
2431         struct btrfs_delayed_ref_root *delayed_refs;
2432         struct btrfs_delayed_ref_node *ref;
2433         struct rb_node *node;
2434         int ret;
2435
2436         delayed_refs = &trans->transaction->delayed_refs;
2437         spin_lock(&delayed_refs->lock);
2438         head = btrfs_find_delayed_ref_head(trans, bytenr);
2439         if (!head)
2440                 goto out;
2441
2442         node = rb_prev(&head->node.rb_node);
2443         if (!node)
2444                 goto out;
2445
2446         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2447
2448         /* there are still entries for this ref, we can't drop it */
2449         if (ref->bytenr == bytenr)
2450                 goto out;
2451
2452         /*
2453          * waiting for the lock here would deadlock.  If someone else has it
2454          * locked they are already in the process of dropping it anyway
2455          */
2456         if (!mutex_trylock(&head->mutex))
2457                 goto out;
2458
2459         /*
2460          * at this point we have a head with no other entries.  Go
2461          * ahead and process it.
2462          */
2463         head->node.in_tree = 0;
2464         rb_erase(&head->node.rb_node, &delayed_refs->root);
2465
2466         delayed_refs->num_entries--;
2467
2468         /*
2469          * we don't take a ref on the node because we're removing it from the
2470          * tree, so we just steal the ref the tree was holding.
2471          */
2472         delayed_refs->num_heads--;
2473         if (list_empty(&head->cluster))
2474                 delayed_refs->num_heads_ready--;
2475
2476         list_del_init(&head->cluster);
2477         spin_unlock(&delayed_refs->lock);
2478
2479         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
2480                                   &head->node, head->must_insert_reserved);
2481         BUG_ON(ret);
2482         btrfs_put_delayed_ref(&head->node);
2483         return 0;
2484 out:
2485         spin_unlock(&delayed_refs->lock);
2486         return 0;
2487 }
2488
2489 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2490                       struct btrfs_root *root,
2491                       u64 bytenr, u64 num_bytes, u64 parent,
2492                       u64 root_objectid, u64 ref_generation,
2493                       u64 owner_objectid, int pin)
2494 {
2495         int ret;
2496
2497         /*
2498          * tree log blocks never actually go into the extent allocation
2499          * tree, just update pinning info and exit early.
2500          *
2501          * data extents referenced by the tree log do need to have
2502          * their reference counts bumped.
2503          */
2504         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID &&
2505             owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2506                 /* unlocks the pinned mutex */
2507                 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2508                 update_reserved_extents(root, bytenr, num_bytes, 0);
2509                 ret = 0;
2510         } else {
2511                 ret = btrfs_add_delayed_ref(trans, bytenr, num_bytes, parent,
2512                                        root_objectid, ref_generation,
2513                                        owner_objectid,
2514                                        BTRFS_DROP_DELAYED_REF, 1);
2515                 BUG_ON(ret);
2516                 ret = check_ref_cleanup(trans, root, bytenr);
2517                 BUG_ON(ret);
2518         }
2519         return ret;
2520 }
2521
2522 static u64 stripe_align(struct btrfs_root *root, u64 val)
2523 {
2524         u64 mask = ((u64)root->stripesize - 1);
2525         u64 ret = (val + mask) & ~mask;
2526         return ret;
2527 }
2528
2529 /*
2530  * walks the btree of allocated extents and find a hole of a given size.
2531  * The key ins is changed to record the hole:
2532  * ins->objectid == block start
2533  * ins->flags = BTRFS_EXTENT_ITEM_KEY
2534  * ins->offset == number of blocks
2535  * Any available blocks before search_start are skipped.
2536  */
2537 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2538                                      struct btrfs_root *orig_root,
2539                                      u64 num_bytes, u64 empty_size,
2540                                      u64 search_start, u64 search_end,
2541                                      u64 hint_byte, struct btrfs_key *ins,
2542                                      u64 exclude_start, u64 exclude_nr,
2543                                      int data)
2544 {
2545         int ret = 0;
2546         struct btrfs_root *root = orig_root->fs_info->extent_root;
2547         struct btrfs_free_cluster *last_ptr = NULL;
2548         struct btrfs_block_group_cache *block_group = NULL;
2549         int empty_cluster = 2 * 1024 * 1024;
2550         int allowed_chunk_alloc = 0;
2551         struct btrfs_space_info *space_info;
2552         int last_ptr_loop = 0;
2553         int loop = 0;
2554
2555         WARN_ON(num_bytes < root->sectorsize);
2556         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2557         ins->objectid = 0;
2558         ins->offset = 0;
2559
2560         space_info = __find_space_info(root->fs_info, data);
2561
2562         if (orig_root->ref_cows || empty_size)
2563                 allowed_chunk_alloc = 1;
2564
2565         if (data & BTRFS_BLOCK_GROUP_METADATA) {
2566                 last_ptr = &root->fs_info->meta_alloc_cluster;
2567                 if (!btrfs_test_opt(root, SSD))
2568                         empty_cluster = 64 * 1024;
2569         }
2570
2571         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
2572                 last_ptr = &root->fs_info->data_alloc_cluster;
2573         }
2574
2575         if (last_ptr) {
2576                 spin_lock(&last_ptr->lock);
2577                 if (last_ptr->block_group)
2578                         hint_byte = last_ptr->window_start;
2579                 spin_unlock(&last_ptr->lock);
2580         }
2581
2582         search_start = max(search_start, first_logical_byte(root, 0));
2583         search_start = max(search_start, hint_byte);
2584
2585         if (!last_ptr) {
2586                 empty_cluster = 0;
2587                 loop = 1;
2588         }
2589
2590         if (search_start == hint_byte) {
2591                 block_group = btrfs_lookup_block_group(root->fs_info,
2592                                                        search_start);
2593                 if (block_group && block_group_bits(block_group, data)) {
2594                         down_read(&space_info->groups_sem);
2595                         goto have_block_group;
2596                 } else if (block_group) {
2597                         btrfs_put_block_group(block_group);
2598                 }
2599         }
2600
2601 search:
2602         down_read(&space_info->groups_sem);
2603         list_for_each_entry(block_group, &space_info->block_groups, list) {
2604                 u64 offset;
2605
2606                 atomic_inc(&block_group->count);
2607                 search_start = block_group->key.objectid;
2608
2609 have_block_group:
2610                 if (unlikely(!block_group->cached)) {
2611                         mutex_lock(&block_group->cache_mutex);
2612                         ret = cache_block_group(root, block_group);
2613                         mutex_unlock(&block_group->cache_mutex);
2614                         if (ret) {
2615                                 btrfs_put_block_group(block_group);
2616                                 break;
2617                         }
2618                 }
2619
2620                 if (unlikely(block_group->ro))
2621                         goto loop;
2622
2623                 if (last_ptr) {
2624                         /*
2625                          * the refill lock keeps out other
2626                          * people trying to start a new cluster
2627                          */
2628                         spin_lock(&last_ptr->refill_lock);
2629                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
2630                                                  num_bytes, search_start);
2631                         if (offset) {
2632                                 /* we have a block, we're done */
2633                                 spin_unlock(&last_ptr->refill_lock);
2634                                 goto checks;
2635                         }
2636
2637                         spin_lock(&last_ptr->lock);
2638                         /*
2639                          * whoops, this cluster doesn't actually point to
2640                          * this block group.  Get a ref on the block
2641                          * group is does point to and try again
2642                          */
2643                         if (!last_ptr_loop && last_ptr->block_group &&
2644                             last_ptr->block_group != block_group) {
2645
2646                                 btrfs_put_block_group(block_group);
2647                                 block_group = last_ptr->block_group;
2648                                 atomic_inc(&block_group->count);
2649                                 spin_unlock(&last_ptr->lock);
2650                                 spin_unlock(&last_ptr->refill_lock);
2651
2652                                 last_ptr_loop = 1;
2653                                 search_start = block_group->key.objectid;
2654                                 goto have_block_group;
2655                         }
2656                         spin_unlock(&last_ptr->lock);
2657
2658                         /*
2659                          * this cluster didn't work out, free it and
2660                          * start over
2661                          */
2662                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
2663
2664                         last_ptr_loop = 0;
2665
2666                         /* allocate a cluster in this block group */
2667                         ret = btrfs_find_space_cluster(trans,
2668                                                block_group, last_ptr,
2669                                                offset, num_bytes,
2670                                                empty_cluster + empty_size);
2671                         if (ret == 0) {
2672                                 /*
2673                                  * now pull our allocation out of this
2674                                  * cluster
2675                                  */
2676                                 offset = btrfs_alloc_from_cluster(block_group,
2677                                                   last_ptr, num_bytes,
2678                                                   search_start);
2679                                 if (offset) {
2680                                         /* we found one, proceed */
2681                                         spin_unlock(&last_ptr->refill_lock);
2682                                         goto checks;
2683                                 }
2684                         }
2685                         /*
2686                          * at this point we either didn't find a cluster
2687                          * or we weren't able to allocate a block from our
2688                          * cluster.  Free the cluster we've been trying
2689                          * to use, and go to the next block group
2690                          */
2691                         if (loop < 2) {
2692                                 btrfs_return_cluster_to_free_space(NULL,
2693                                                                    last_ptr);
2694                                 spin_unlock(&last_ptr->refill_lock);
2695                                 goto loop;
2696                         }
2697                         spin_unlock(&last_ptr->refill_lock);
2698                 }
2699
2700                 offset = btrfs_find_space_for_alloc(block_group, search_start,
2701                                                     num_bytes, empty_size);
2702                 if (!offset)
2703                         goto loop;
2704 checks:
2705                 search_start = stripe_align(root, offset);
2706
2707                 /* move on to the next group */
2708                 if (search_start + num_bytes >= search_end) {
2709                         btrfs_add_free_space(block_group, offset, num_bytes);
2710                         goto loop;
2711                 }
2712
2713                 /* move on to the next group */
2714                 if (search_start + num_bytes >
2715                     block_group->key.objectid + block_group->key.offset) {
2716                         btrfs_add_free_space(block_group, offset, num_bytes);
2717                         goto loop;
2718                 }
2719
2720                 if (exclude_nr > 0 &&
2721                     (search_start + num_bytes > exclude_start &&
2722                      search_start < exclude_start + exclude_nr)) {
2723                         search_start = exclude_start + exclude_nr;
2724
2725                         btrfs_add_free_space(block_group, offset, num_bytes);
2726                         /*
2727                          * if search_start is still in this block group
2728                          * then we just re-search this block group
2729                          */
2730                         if (search_start >= block_group->key.objectid &&
2731                             search_start < (block_group->key.objectid +
2732                                             block_group->key.offset))
2733                                 goto have_block_group;
2734                         goto loop;
2735                 }
2736
2737                 ins->objectid = search_start;
2738                 ins->offset = num_bytes;
2739
2740                 if (offset < search_start)
2741                         btrfs_add_free_space(block_group, offset,
2742                                              search_start - offset);
2743                 BUG_ON(offset > search_start);
2744
2745                 /* we are all good, lets return */
2746                 break;
2747 loop:
2748                 btrfs_put_block_group(block_group);
2749         }
2750         up_read(&space_info->groups_sem);
2751
2752         /* loop == 0, try to find a clustered alloc in every block group
2753          * loop == 1, try again after forcing a chunk allocation
2754          * loop == 2, set empty_size and empty_cluster to 0 and try again
2755          */
2756         if (!ins->objectid && loop < 3 &&
2757             (empty_size || empty_cluster || allowed_chunk_alloc)) {
2758                 if (loop >= 2) {
2759                         empty_size = 0;
2760                         empty_cluster = 0;
2761                 }
2762
2763                 if (allowed_chunk_alloc) {
2764                         ret = do_chunk_alloc(trans, root, num_bytes +
2765                                              2 * 1024 * 1024, data, 1);
2766                         allowed_chunk_alloc = 0;
2767                 } else {
2768                         space_info->force_alloc = 1;
2769                 }
2770
2771                 if (loop < 3) {
2772                         loop++;
2773                         goto search;
2774                 }
2775                 ret = -ENOSPC;
2776         } else if (!ins->objectid) {
2777                 ret = -ENOSPC;
2778         }
2779
2780         /* we found what we needed */
2781         if (ins->objectid) {
2782                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2783                         trans->block_group = block_group->key.objectid;
2784
2785                 btrfs_put_block_group(block_group);
2786                 ret = 0;
2787         }
2788
2789         return ret;
2790 }
2791
2792 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
2793 {
2794         struct btrfs_block_group_cache *cache;
2795
2796         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
2797                (unsigned long long)(info->total_bytes - info->bytes_used -
2798                                     info->bytes_pinned - info->bytes_reserved),
2799                (info->full) ? "" : "not ");
2800         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
2801                " may_use=%llu, used=%llu\n", info->total_bytes,
2802                info->bytes_pinned, info->bytes_delalloc, info->bytes_may_use,
2803                info->bytes_used);
2804
2805         down_read(&info->groups_sem);
2806         list_for_each_entry(cache, &info->block_groups, list) {
2807                 spin_lock(&cache->lock);
2808                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
2809                        "%llu pinned %llu reserved\n",
2810                        (unsigned long long)cache->key.objectid,
2811                        (unsigned long long)cache->key.offset,
2812                        (unsigned long long)btrfs_block_group_used(&cache->item),
2813                        (unsigned long long)cache->pinned,
2814                        (unsigned long long)cache->reserved);
2815                 btrfs_dump_free_space(cache, bytes);
2816                 spin_unlock(&cache->lock);
2817         }
2818         up_read(&info->groups_sem);
2819 }
2820
2821 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2822                                   struct btrfs_root *root,
2823                                   u64 num_bytes, u64 min_alloc_size,
2824                                   u64 empty_size, u64 hint_byte,
2825                                   u64 search_end, struct btrfs_key *ins,
2826                                   u64 data)
2827 {
2828         int ret;
2829         u64 search_start = 0;
2830         struct btrfs_fs_info *info = root->fs_info;
2831
2832         data = btrfs_get_alloc_profile(root, data);
2833 again:
2834         /*
2835          * the only place that sets empty_size is btrfs_realloc_node, which
2836          * is not called recursively on allocations
2837          */
2838         if (empty_size || root->ref_cows) {
2839                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2840                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2841                                      2 * 1024 * 1024,
2842                                      BTRFS_BLOCK_GROUP_METADATA |
2843                                      (info->metadata_alloc_profile &
2844                                       info->avail_metadata_alloc_bits), 0);
2845                 }
2846                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2847                                      num_bytes + 2 * 1024 * 1024, data, 0);
2848         }
2849
2850         WARN_ON(num_bytes < root->sectorsize);
2851         ret = find_free_extent(trans, root, num_bytes, empty_size,
2852                                search_start, search_end, hint_byte, ins,
2853                                trans->alloc_exclude_start,
2854                                trans->alloc_exclude_nr, data);
2855
2856         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2857                 num_bytes = num_bytes >> 1;
2858                 num_bytes = num_bytes & ~(root->sectorsize - 1);
2859                 num_bytes = max(num_bytes, min_alloc_size);
2860                 do_chunk_alloc(trans, root->fs_info->extent_root,
2861                                num_bytes, data, 1);
2862                 goto again;
2863         }
2864         if (ret) {
2865                 struct btrfs_space_info *sinfo;
2866
2867                 sinfo = __find_space_info(root->fs_info, data);
2868                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
2869                        "wanted %llu\n", (unsigned long long)data,
2870                        (unsigned long long)num_bytes);
2871                 dump_space_info(sinfo, num_bytes);
2872                 BUG();
2873         }
2874
2875         return ret;
2876 }
2877
2878 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2879 {
2880         struct btrfs_block_group_cache *cache;
2881         int ret = 0;
2882
2883         cache = btrfs_lookup_block_group(root->fs_info, start);
2884         if (!cache) {
2885                 printk(KERN_ERR "Unable to find block group for %llu\n",
2886                        (unsigned long long)start);
2887                 return -ENOSPC;
2888         }
2889
2890         ret = btrfs_discard_extent(root, start, len);
2891
2892         btrfs_add_free_space(cache, start, len);
2893         btrfs_put_block_group(cache);
2894         update_reserved_extents(root, start, len, 0);
2895
2896         return ret;
2897 }
2898
2899 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2900                                   struct btrfs_root *root,
2901                                   u64 num_bytes, u64 min_alloc_size,
2902                                   u64 empty_size, u64 hint_byte,
2903                                   u64 search_end, struct btrfs_key *ins,
2904                                   u64 data)
2905 {
2906         int ret;
2907         ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2908                                      empty_size, hint_byte, search_end, ins,
2909                                      data);
2910         update_reserved_extents(root, ins->objectid, ins->offset, 1);
2911         return ret;
2912 }
2913
2914 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2915                                          struct btrfs_root *root, u64 parent,
2916                                          u64 root_objectid, u64 ref_generation,
2917                                          u64 owner, struct btrfs_key *ins,
2918                                          int ref_mod)
2919 {
2920         int ret;
2921         u64 super_used;
2922         u64 root_used;
2923         u64 num_bytes = ins->offset;
2924         u32 sizes[2];
2925         struct btrfs_fs_info *info = root->fs_info;
2926         struct btrfs_root *extent_root = info->extent_root;
2927         struct btrfs_extent_item *extent_item;
2928         struct btrfs_extent_ref *ref;
2929         struct btrfs_path *path;
2930         struct btrfs_key keys[2];
2931
2932         if (parent == 0)
2933                 parent = ins->objectid;
2934
2935         /* block accounting for super block */
2936         spin_lock(&info->delalloc_lock);
2937         super_used = btrfs_super_bytes_used(&info->super_copy);
2938         btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2939
2940         /* block accounting for root item */
2941         root_used = btrfs_root_used(&root->root_item);
2942         btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2943         spin_unlock(&info->delalloc_lock);
2944
2945         memcpy(&keys[0], ins, sizeof(*ins));
2946         keys[1].objectid = ins->objectid;
2947         keys[1].type = BTRFS_EXTENT_REF_KEY;
2948         keys[1].offset = parent;
2949         sizes[0] = sizeof(*extent_item);
2950         sizes[1] = sizeof(*ref);
2951
2952         path = btrfs_alloc_path();
2953         BUG_ON(!path);
2954
2955         path->leave_spinning = 1;
2956         ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2957                                        sizes, 2);
2958         BUG_ON(ret);
2959
2960         extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2961                                      struct btrfs_extent_item);
2962         btrfs_set_extent_refs(path->nodes[0], extent_item, ref_mod);
2963         ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2964                              struct btrfs_extent_ref);
2965
2966         btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2967         btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2968         btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2969         btrfs_set_ref_num_refs(path->nodes[0], ref, ref_mod);
2970
2971         btrfs_mark_buffer_dirty(path->nodes[0]);
2972
2973         trans->alloc_exclude_start = 0;
2974         trans->alloc_exclude_nr = 0;
2975         btrfs_free_path(path);
2976
2977         if (ret)
2978                 goto out;
2979
2980         ret = update_block_group(trans, root, ins->objectid,
2981                                  ins->offset, 1, 0);
2982         if (ret) {
2983                 printk(KERN_ERR "btrfs update block group failed for %llu "
2984                        "%llu\n", (unsigned long long)ins->objectid,
2985                        (unsigned long long)ins->offset);
2986                 BUG();
2987         }
2988 out:
2989         return ret;
2990 }
2991
2992 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2993                                 struct btrfs_root *root, u64 parent,
2994                                 u64 root_objectid, u64 ref_generation,
2995                                 u64 owner, struct btrfs_key *ins)
2996 {
2997         int ret;
2998
2999         if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3000                 return 0;
3001
3002         ret = btrfs_add_delayed_ref(trans, ins->objectid,
3003                                     ins->offset, parent, root_objectid,
3004                                     ref_generation, owner,
3005                                     BTRFS_ADD_DELAYED_EXTENT, 0);
3006         BUG_ON(ret);
3007         return ret;
3008 }
3009
3010 /*
3011  * this is used by the tree logging recovery code.  It records that
3012  * an extent has been allocated and makes sure to clear the free
3013  * space cache bits as well
3014  */
3015 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3016                                 struct btrfs_root *root, u64 parent,
3017                                 u64 root_objectid, u64 ref_generation,
3018                                 u64 owner, struct btrfs_key *ins)
3019 {
3020         int ret;
3021         struct btrfs_block_group_cache *block_group;
3022
3023         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3024         mutex_lock(&block_group->cache_mutex);
3025         cache_block_group(root, block_group);
3026         mutex_unlock(&block_group->cache_mutex);
3027
3028         ret = btrfs_remove_free_space(block_group, ins->objectid,
3029                                       ins->offset);
3030         BUG_ON(ret);
3031         btrfs_put_block_group(block_group);
3032         ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3033                                             ref_generation, owner, ins, 1);
3034         return ret;
3035 }
3036
3037 /*
3038  * finds a free extent and does all the dirty work required for allocation
3039  * returns the key for the extent through ins, and a tree buffer for
3040  * the first block of the extent through buf.
3041  *
3042  * returns 0 if everything worked, non-zero otherwise.
3043  */
3044 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3045                        struct btrfs_root *root,
3046                        u64 num_bytes, u64 parent, u64 min_alloc_size,
3047                        u64 root_objectid, u64 ref_generation,
3048                        u64 owner_objectid, u64 empty_size, u64 hint_byte,
3049                        u64 search_end, struct btrfs_key *ins, u64 data)
3050 {
3051         int ret;
3052         ret = __btrfs_reserve_extent(trans, root, num_bytes,
3053                                      min_alloc_size, empty_size, hint_byte,
3054                                      search_end, ins, data);
3055         BUG_ON(ret);
3056         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3057                 ret = btrfs_add_delayed_ref(trans, ins->objectid,
3058                                             ins->offset, parent, root_objectid,
3059                                             ref_generation, owner_objectid,
3060                                             BTRFS_ADD_DELAYED_EXTENT, 0);
3061                 BUG_ON(ret);
3062         }
3063         update_reserved_extents(root, ins->objectid, ins->offset, 1);
3064         return ret;
3065 }
3066
3067 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3068                                             struct btrfs_root *root,
3069                                             u64 bytenr, u32 blocksize,
3070                                             int level)
3071 {
3072         struct extent_buffer *buf;
3073
3074         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3075         if (!buf)
3076                 return ERR_PTR(-ENOMEM);
3077         btrfs_set_header_generation(buf, trans->transid);
3078         btrfs_set_buffer_lockdep_class(buf, level);
3079         btrfs_tree_lock(buf);
3080         clean_tree_block(trans, root, buf);
3081
3082         btrfs_set_lock_blocking(buf);
3083         btrfs_set_buffer_uptodate(buf);
3084
3085         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3086                 set_extent_dirty(&root->dirty_log_pages, buf->start,
3087                          buf->start + buf->len - 1, GFP_NOFS);
3088         } else {
3089                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3090                          buf->start + buf->len - 1, GFP_NOFS);
3091         }
3092         trans->blocks_used++;
3093         /* this returns a buffer locked for blocking */
3094         return buf;
3095 }
3096
3097 /*
3098  * helper function to allocate a block for a given tree
3099  * returns the tree buffer or NULL.
3100  */
3101 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3102                                              struct btrfs_root *root,
3103                                              u32 blocksize, u64 parent,
3104                                              u64 root_objectid,
3105                                              u64 ref_generation,
3106                                              int level,
3107                                              u64 hint,
3108                                              u64 empty_size)
3109 {
3110         struct btrfs_key ins;
3111         int ret;
3112         struct extent_buffer *buf;
3113
3114         ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3115                                  root_objectid, ref_generation, level,
3116                                  empty_size, hint, (u64)-1, &ins, 0);
3117         if (ret) {
3118                 BUG_ON(ret > 0);
3119                 return ERR_PTR(ret);
3120         }
3121
3122         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
3123                                     blocksize, level);
3124         return buf;
3125 }
3126
3127 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3128                         struct btrfs_root *root, struct extent_buffer *leaf)
3129 {
3130         u64 leaf_owner;
3131         u64 leaf_generation;
3132         struct refsort *sorted;
3133         struct btrfs_key key;
3134         struct btrfs_file_extent_item *fi;
3135         int i;
3136         int nritems;
3137         int ret;
3138         int refi = 0;
3139         int slot;
3140
3141         BUG_ON(!btrfs_is_leaf(leaf));
3142         nritems = btrfs_header_nritems(leaf);
3143         leaf_owner = btrfs_header_owner(leaf);
3144         leaf_generation = btrfs_header_generation(leaf);
3145
3146         sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3147         /* we do this loop twice.  The first time we build a list
3148          * of the extents we have a reference on, then we sort the list
3149          * by bytenr.  The second time around we actually do the
3150          * extent freeing.
3151          */
3152         for (i = 0; i < nritems; i++) {
3153                 u64 disk_bytenr;
3154                 cond_resched();
3155
3156                 btrfs_item_key_to_cpu(leaf, &key, i);
3157
3158                 /* only extents have references, skip everything else */
3159                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3160                         continue;
3161
3162                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3163
3164                 /* inline extents live in the btree, they don't have refs */
3165                 if (btrfs_file_extent_type(leaf, fi) ==
3166                     BTRFS_FILE_EXTENT_INLINE)
3167                         continue;
3168
3169                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3170
3171                 /* holes don't have refs */
3172                 if (disk_bytenr == 0)
3173                         continue;
3174
3175                 sorted[refi].bytenr = disk_bytenr;
3176                 sorted[refi].slot = i;
3177                 refi++;
3178         }
3179
3180         if (refi == 0)
3181                 goto out;
3182
3183         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3184
3185         for (i = 0; i < refi; i++) {
3186                 u64 disk_bytenr;
3187
3188                 disk_bytenr = sorted[i].bytenr;
3189                 slot = sorted[i].slot;
3190
3191                 cond_resched();
3192
3193                 btrfs_item_key_to_cpu(leaf, &key, slot);
3194                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3195                         continue;
3196
3197                 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3198
3199                 ret = btrfs_free_extent(trans, root, disk_bytenr,
3200                                 btrfs_file_extent_disk_num_bytes(leaf, fi),
3201                                 leaf->start, leaf_owner, leaf_generation,
3202                                 key.objectid, 0);
3203                 BUG_ON(ret);
3204
3205                 atomic_inc(&root->fs_info->throttle_gen);
3206                 wake_up(&root->fs_info->transaction_throttle);
3207                 cond_resched();
3208         }
3209 out:
3210         kfree(sorted);
3211         return 0;
3212 }
3213
3214 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3215                                         struct btrfs_root *root,
3216                                         struct btrfs_leaf_ref *ref)
3217 {
3218         int i;
3219         int ret;
3220         struct btrfs_extent_info *info;
3221         struct refsort *sorted;
3222
3223         if (ref->nritems == 0)
3224                 return 0;
3225
3226         sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3227         for (i = 0; i < ref->nritems; i++) {
3228                 sorted[i].bytenr = ref->extents[i].bytenr;
3229                 sorted[i].slot = i;
3230         }
3231         sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3232
3233         /*
3234          * the items in the ref were sorted when the ref was inserted
3235          * into the ref cache, so this is already in order
3236          */
3237         for (i = 0; i < ref->nritems; i++) {
3238                 info = ref->extents + sorted[i].slot;
3239                 ret = btrfs_free_extent(trans, root, info->bytenr,
3240                                           info->num_bytes, ref->bytenr,
3241                                           ref->owner, ref->generation,
3242                                           info->objectid, 0);
3243
3244                 atomic_inc(&root->fs_info->throttle_gen);
3245                 wake_up(&root->fs_info->transaction_throttle);
3246                 cond_resched();
3247
3248                 BUG_ON(ret);
3249                 info++;
3250         }
3251
3252         kfree(sorted);
3253         return 0;
3254 }
3255
3256 static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
3257                                      struct btrfs_root *root, u64 start,
3258                                      u64 len, u32 *refs)
3259 {
3260         int ret;
3261
3262         ret = btrfs_lookup_extent_ref(trans, root, start, len, refs);
3263         BUG_ON(ret);
3264
3265 #if 0 /* some debugging code in case we see problems here */
3266         /* if the refs count is one, it won't get increased again.  But
3267          * if the ref count is > 1, someone may be decreasing it at
3268          * the same time we are.
3269          */
3270         if (*refs != 1) {
3271                 struct extent_buffer *eb = NULL;
3272                 eb = btrfs_find_create_tree_block(root, start, len);
3273                 if (eb)
3274                         btrfs_tree_lock(eb);
3275
3276                 mutex_lock(&root->fs_info->alloc_mutex);
3277                 ret = lookup_extent_ref(NULL, root, start, len, refs);
3278                 BUG_ON(ret);
3279                 mutex_unlock(&root->fs_info->alloc_mutex);
3280
3281                 if (eb) {
3282                         btrfs_tree_unlock(eb);
3283                         free_extent_buffer(eb);
3284                 }
3285                 if (*refs == 1) {
3286                         printk(KERN_ERR "btrfs block %llu went down to one "
3287                                "during drop_snap\n", (unsigned long long)start);
3288                 }
3289
3290         }
3291 #endif
3292
3293         cond_resched();
3294         return ret;
3295 }
3296
3297 /*
3298  * this is used while deleting old snapshots, and it drops the refs
3299  * on a whole subtree starting from a level 1 node.
3300  *
3301  * The idea is to sort all the leaf pointers, and then drop the
3302  * ref on all the leaves in order.  Most of the time the leaves
3303  * will have ref cache entries, so no leaf IOs will be required to
3304  * find the extents they have references on.
3305  *
3306  * For each leaf, any references it has are also dropped in order
3307  *
3308  * This ends up dropping the references in something close to optimal
3309  * order for reading and modifying the extent allocation tree.
3310  */
3311 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3312                                         struct btrfs_root *root,
3313                                         struct btrfs_path *path)
3314 {
3315         u64 bytenr;
3316         u64 root_owner;
3317         u64 root_gen;
3318         struct extent_buffer *eb = path->nodes[1];
3319         struct extent_buffer *leaf;
3320         struct btrfs_leaf_ref *ref;
3321         struct refsort *sorted = NULL;
3322         int nritems = btrfs_header_nritems(eb);
3323         int ret;
3324         int i;
3325         int refi = 0;
3326         int slot = path->slots[1];
3327         u32 blocksize = btrfs_level_size(root, 0);
3328         u32 refs;
3329
3330         if (nritems == 0)
3331                 goto out;
3332
3333         root_owner = btrfs_header_owner(eb);
3334         root_gen = btrfs_header_generation(eb);
3335         sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3336
3337         /*
3338          * step one, sort all the leaf pointers so we don't scribble
3339          * randomly into the extent allocation tree
3340          */
3341         for (i = slot; i < nritems; i++) {
3342                 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3343                 sorted[refi].slot = i;
3344                 refi++;
3345         }
3346
3347         /*
3348          * nritems won't be zero, but if we're picking up drop_snapshot
3349          * after a crash, slot might be > 0, so double check things
3350          * just in case.
3351          */
3352         if (refi == 0)
3353                 goto out;
3354
3355         sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3356
3357         /*
3358          * the first loop frees everything the leaves point to
3359          */
3360         for (i = 0; i < refi; i++) {
3361                 u64 ptr_gen;
3362
3363                 bytenr = sorted[i].bytenr;
3364
3365                 /*
3366                  * check the reference count on this leaf.  If it is > 1
3367                  * we just decrement it below and don't update any
3368                  * of the refs the leaf points to.
3369                  */
3370                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
3371                                                 blocksize, &refs);
3372                 BUG_ON(ret);
3373                 if (refs != 1)
3374                         continue;
3375
3376                 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3377
3378                 /*
3379                  * the leaf only had one reference, which means the
3380                  * only thing pointing to this leaf is the snapshot
3381                  * we're deleting.  It isn't possible for the reference
3382                  * count to increase again later
3383                  *
3384                  * The reference cache is checked for the leaf,
3385                  * and if found we'll be able to drop any refs held by
3386                  * the leaf without needing to read it in.
3387                  */
3388                 ref = btrfs_lookup_leaf_ref(root, bytenr);
3389                 if (ref && ref->generation != ptr_gen) {
3390                         btrfs_free_leaf_ref(root, ref);
3391                         ref = NULL;
3392                 }
3393                 if (ref) {
3394                         ret = cache_drop_leaf_ref(trans, root, ref);
3395                         BUG_ON(ret);
3396                         btrfs_remove_leaf_ref(root, ref);
3397                         btrfs_free_leaf_ref(root, ref);
3398                 } else {
3399                         /*
3400                          * the leaf wasn't in the reference cache, so
3401                          * we have to read it.
3402                          */
3403                         leaf = read_tree_block(root, bytenr, blocksize,
3404                                                ptr_gen);
3405                         ret = btrfs_drop_leaf_ref(trans, root, leaf);
3406                         BUG_ON(ret);
3407                         free_extent_buffer(leaf);
3408                 }
3409                 atomic_inc(&root->fs_info->throttle_gen);
3410                 wake_up(&root->fs_info->transaction_throttle);
3411                 cond_resched();
3412         }
3413
3414         /*
3415          * run through the loop again to free the refs on the leaves.
3416          * This is faster than doing it in the loop above because
3417          * the leaves are likely to be clustered together.  We end up
3418          * working in nice chunks on the extent allocation tree.
3419          */
3420         for (i = 0; i < refi; i++) {
3421                 bytenr = sorted[i].bytenr;
3422                 ret = btrfs_free_extent(trans, root, bytenr,
3423                                         blocksize, eb->start,
3424                                         root_owner, root_gen, 0, 1);
3425                 BUG_ON(ret);
3426
3427                 atomic_inc(&root->fs_info->throttle_gen);
3428                 wake_up(&root->fs_info->transaction_throttle);
3429                 cond_resched();
3430         }
3431 out:
3432         kfree(sorted);
3433
3434         /*
3435          * update the path to show we've processed the entire level 1
3436          * node.  This will get saved into the root's drop_snapshot_progress
3437          * field so these drops are not repeated again if this transaction
3438          * commits.
3439          */
3440         path->slots[1] = nritems;
3441         return 0;
3442 }
3443
3444 /*
3445  * helper function for drop_snapshot, this walks down the tree dropping ref
3446  * counts as it goes.
3447  */
3448 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3449                                    struct btrfs_root *root,
3450                                    struct btrfs_path *path, int *level)
3451 {
3452         u64 root_owner;
3453         u64 root_gen;
3454         u64 bytenr;
3455         u64 ptr_gen;
3456         struct extent_buffer *next;
3457         struct extent_buffer *cur;
3458         struct extent_buffer *parent;
3459         u32 blocksize;
3460         int ret;
3461         u32 refs;
3462
3463         WARN_ON(*level < 0);
3464         WARN_ON(*level >= BTRFS_MAX_LEVEL);
3465         ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
3466                                 path->nodes[*level]->len, &refs);
3467         BUG_ON(ret);
3468         if (refs > 1)
3469                 goto out;
3470
3471         /*
3472          * walk down to the last node level and free all the leaves
3473          */
3474         while (*level >= 0) {
3475                 WARN_ON(*level < 0);
3476                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3477                 cur = path->nodes[*level];
3478
3479                 if (btrfs_header_level(cur) != *level)
3480                         WARN_ON(1);
3481
3482                 if (path->slots[*level] >=
3483                     btrfs_header_nritems(cur))
3484                         break;
3485
3486                 /* the new code goes down to level 1 and does all the
3487                  * leaves pointed to that node in bulk.  So, this check
3488                  * for level 0 will always be false.
3489                  *
3490                  * But, the disk format allows the drop_snapshot_progress
3491                  * field in the root to leave things in a state where
3492                  * a leaf will need cleaning up here.  If someone crashes
3493                  * with the old code and then boots with the new code,
3494                  * we might find a leaf here.
3495                  */
3496                 if (*level == 0) {
3497                         ret = btrfs_drop_leaf_ref(trans, root, cur);
3498                         BUG_ON(ret);
3499                         break;
3500                 }
3501
3502                 /*
3503                  * once we get to level one, process the whole node
3504                  * at once, including everything below it.
3505                  */
3506                 if (*level == 1) {
3507                         ret = drop_level_one_refs(trans, root, path);
3508                         BUG_ON(ret);
3509                         break;
3510                 }
3511
3512                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3513                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3514                 blocksize = btrfs_level_size(root, *level - 1);
3515
3516                 ret = drop_snap_lookup_refcount(trans, root, bytenr,
3517                                                 blocksize, &refs);
3518                 BUG_ON(ret);
3519
3520                 /*
3521                  * if there is more than one reference, we don't need
3522                  * to read that node to drop any references it has.  We
3523                  * just drop the ref we hold on that node and move on to the
3524                  * next slot in this level.
3525                  */
3526                 if (refs != 1) {
3527                         parent = path->nodes[*level];
3528                         root_owner = btrfs_header_owner(parent);
3529                         root_gen = btrfs_header_generation(parent);
3530                         path->slots[*level]++;
3531
3532                         ret = btrfs_free_extent(trans, root, bytenr,
3533                                                 blocksize, parent->start,
3534                                                 root_owner, root_gen,
3535                                                 *level - 1, 1);
3536                         BUG_ON(ret);
3537
3538                         atomic_inc(&root->fs_info->throttle_gen);
3539                         wake_up(&root->fs_info->transaction_throttle);
3540                         cond_resched();
3541
3542                         continue;
3543                 }
3544
3545                 /*
3546                  * we need to keep freeing things in the next level down.
3547                  * read the block and loop around to process it
3548                  */
3549                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3550                 WARN_ON(*level <= 0);
3551                 if (path->nodes[*level-1])
3552                         free_extent_buffer(path->nodes[*level-1]);
3553                 path->nodes[*level-1] = next;
3554                 *level = btrfs_header_level(next);
3555                 path->slots[*level] = 0;
3556                 cond_resched();
3557         }
3558 out:
3559         WARN_ON(*level < 0);
3560         WARN_ON(*level >= BTRFS_MAX_LEVEL);
3561
3562         if (path->nodes[*level] == root->node) {
3563                 parent = path->nodes[*level];
3564                 bytenr = path->nodes[*level]->start;
3565         } else {
3566                 parent = path->nodes[*level + 1];
3567                 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3568         }
3569
3570         blocksize = btrfs_level_size(root, *level);
3571         root_owner = btrfs_header_owner(parent);
3572         root_gen = btrfs_header_generation(parent);
3573
3574         /*
3575          * cleanup and free the reference on the last node
3576          * we processed
3577          */
3578         ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3579                                   parent->start, root_owner, root_gen,
3580                                   *level, 1);
3581         free_extent_buffer(path->nodes[*level]);
3582         path->nodes[*level] = NULL;
3583
3584         *level += 1;
3585         BUG_ON(ret);
3586
3587         cond_resched();
3588         return 0;
3589 }
3590
3591 /*
3592  * helper function for drop_subtree, this function is similar to
3593  * walk_down_tree. The main difference is that it checks reference
3594  * counts while tree blocks are locked.
3595  */
3596 static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3597                                       struct btrfs_root *root,
3598                                       struct btrfs_path *path, int *level)
3599 {
3600         struct extent_buffer *next;
3601         struct extent_buffer *cur;
3602         struct extent_buffer *parent;
3603         u64 bytenr;
3604         u64 ptr_gen;
3605         u32 blocksize;
3606         u32 refs;
3607         int ret;
3608
3609         cur = path->nodes[*level];
3610         ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3611                                       &refs);
3612         BUG_ON(ret);
3613         if (refs > 1)
3614                 goto out;
3615
3616         while (*level >= 0) {
3617                 cur = path->nodes[*level];
3618                 if (*level == 0) {
3619                         ret = btrfs_drop_leaf_ref(trans, root, cur);
3620                         BUG_ON(ret);
3621                         clean_tree_block(trans, root, cur);
3622                         break;
3623                 }
3624                 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3625                         clean_tree_block(trans, root, cur);
3626                         break;
3627                 }
3628
3629                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3630                 blocksize = btrfs_level_size(root, *level - 1);
3631                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3632
3633                 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3634                 btrfs_tree_lock(next);
3635                 btrfs_set_lock_blocking(next);
3636
3637                 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3638                                               &refs);
3639                 BUG_ON(ret);
3640                 if (refs > 1) {
3641                         parent = path->nodes[*level];
3642                         ret = btrfs_free_extent(trans, root, bytenr,
3643                                         blocksize, parent->start,
3644                                         btrfs_header_owner(parent),
3645                                         btrfs_header_generation(parent),
3646                                         *level - 1, 1);
3647                         BUG_ON(ret);
3648                         path->slots[*level]++;
3649                         btrfs_tree_unlock(next);
3650                         free_extent_buffer(next);
3651                         continue;
3652                 }
3653
3654                 *level = btrfs_header_level(next);
3655                 path->nodes[*level] = next;
3656                 path->slots[*level] = 0;
3657                 path->locks[*level] = 1;
3658                 cond_resched();
3659         }
3660 out:
3661         parent = path->nodes[*level + 1];
3662         bytenr = path->nodes[*level]->start;
3663         blocksize = path->nodes[*level]->len;
3664
3665         ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3666                         parent->start, btrfs_header_owner(parent),
3667                         btrfs_header_generation(parent), *level, 1);
3668         BUG_ON(ret);
3669
3670         if (path->locks[*level]) {
3671                 btrfs_tree_unlock(path->nodes[*level]);
3672                 path->locks[*level] = 0;
3673         }
3674         free_extent_buffer(path->nodes[*level]);
3675         path->nodes[*level] = NULL;
3676         *level += 1;
3677         cond_resched();
3678         return 0;
3679 }
3680
3681 /*
3682  * helper for dropping snapshots.  This walks back up the tree in the path
3683  * to find the first node higher up where we haven't yet gone through
3684  * all the slots
3685  */
3686 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
3687                                  struct btrfs_root *root,
3688                                  struct btrfs_path *path,
3689                                  int *level, int max_level)
3690 {
3691         u64 root_owner;
3692         u64 root_gen;
3693         struct btrfs_root_item *root_item = &root->root_item;
3694         int i;
3695         int slot;
3696         int ret;
3697
3698         for (i = *level; i < max_level && path->nodes[i]; i++) {
3699                 slot = path->slots[i];
3700                 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3701                         struct extent_buffer *node;
3702                         struct btrfs_disk_key disk_key;
3703
3704                         /*
3705                          * there is more work to do in this level.
3706                          * Update the drop_progress marker to reflect
3707                          * the work we've done so far, and then bump
3708                          * the slot number
3709                          */
3710                         node = path->nodes[i];
3711                         path->slots[i]++;
3712                         *level = i;
3713                         WARN_ON(*level == 0);
3714                         btrfs_node_key(node, &disk_key, path->slots[i]);
3715                         memcpy(&root_item->drop_progress,
3716                                &disk_key, sizeof(disk_key));
3717                         root_item->drop_level = i;
3718                         return 0;
3719                 } else {
3720                         struct extent_buffer *parent;
3721
3722                         /*
3723                          * this whole node is done, free our reference
3724                          * on it and go up one level
3725                          */
3726                         if (path->nodes[*level] == root->node)
3727                                 parent = path->nodes[*level];
3728                         else
3729                                 parent = path->nodes[*level + 1];
3730
3731                         root_owner = btrfs_header_owner(parent);
3732                         root_gen = btrfs_header_generation(parent);
3733
3734                         clean_tree_block(trans, root, path->nodes[*level]);
3735                         ret = btrfs_free_extent(trans, root,
3736                                                 path->nodes[*level]->start,
3737                                                 path->nodes[*level]->len,
3738                                                 parent->start, root_owner,
3739                                                 root_gen, *level, 1);
3740                         BUG_ON(ret);
3741                         if (path->locks[*level]) {
3742                                 btrfs_tree_unlock(path->nodes[*level]);
3743                                 path->locks[*level] = 0;
3744                         }
3745                         free_extent_buffer(path->nodes[*level]);
3746                         path->nodes[*level] = NULL;
3747                         *level = i + 1;
3748                 }
3749         }
3750         return 1;
3751 }
3752
3753 /*
3754  * drop the reference count on the tree rooted at 'snap'.  This traverses
3755  * the tree freeing any blocks that have a ref count of zero after being
3756  * decremented.
3757  */
3758 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3759                         *root)
3760 {
3761         int ret = 0;
3762         int wret;
3763         int level;
3764         struct btrfs_path *path;
3765         int i;
3766         int orig_level;
3767         int update_count;
3768         struct btrfs_root_item *root_item = &root->root_item;
3769
3770         WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3771         path = btrfs_alloc_path();
3772         BUG_ON(!path);
3773
3774         level = btrfs_header_level(root->node);
3775         orig_level = level;
3776         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3777                 path->nodes[level] = root->node;
3778                 extent_buffer_get(root->node);
3779                 path->slots[level] = 0;
3780         } else {
3781                 struct btrfs_key key;
3782                 struct btrfs_disk_key found_key;
3783                 struct extent_buffer *node;
3784
3785                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3786                 level = root_item->drop_level;
3787                 path->lowest_level = level;
3788                 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3789                 if (wret < 0) {
3790                         ret = wret;
3791                         goto out;
3792                 }
3793                 node = path->nodes[level];
3794                 btrfs_node_key(node, &found_key, path->slots[level]);
3795                 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3796                                sizeof(found_key)));
3797                 /*
3798                  * unlock our path, this is safe because only this
3799                  * function is allowed to delete this snapshot
3800                  */
3801                 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3802                         if (path->nodes[i] && path->locks[i]) {
3803                                 path->locks[i] = 0;
3804                                 btrfs_tree_unlock(path->nodes[i]);
3805                         }
3806                 }
3807         }
3808         while (1) {
3809                 unsigned long update;
3810                 wret = walk_down_tree(trans, root, path, &level);
3811                 if (wret > 0)
3812                         break;
3813                 if (wret < 0)
3814                         ret = wret;
3815
3816                 wret = walk_up_tree(trans, root, path, &level,
3817                                     BTRFS_MAX_LEVEL);
3818                 if (wret > 0)
3819                         break;
3820                 if (wret < 0)
3821                         ret = wret;
3822                 if (trans->transaction->in_commit ||
3823                     trans->transaction->delayed_refs.flushing) {
3824                         ret = -EAGAIN;
3825                         break;
3826                 }
3827                 atomic_inc(&root->fs_info->throttle_gen);
3828                 wake_up(&root->fs_info->transaction_throttle);
3829                 for (update_count = 0; update_count < 16; update_count++) {
3830                         update = trans->delayed_ref_updates;
3831                         trans->delayed_ref_updates = 0;
3832                         if (update)
3833                                 btrfs_run_delayed_refs(trans, root, update);
3834                         else
3835                                 break;
3836                 }
3837         }
3838         for (i = 0; i <= orig_level; i++) {
3839                 if (path->nodes[i]) {
3840                         free_extent_buffer(path->nodes[i]);
3841                         path->nodes[i] = NULL;
3842                 }
3843         }
3844 out:
3845         btrfs_free_path(path);
3846         return ret;
3847 }
3848
3849 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3850                         struct btrfs_root *root,
3851                         struct extent_buffer *node,
3852                         struct extent_buffer *parent)
3853 {
3854         struct btrfs_path *path;
3855         int level;
3856         int parent_level;
3857         int ret = 0;
3858         int wret;
3859
3860         path = btrfs_alloc_path();
3861         BUG_ON(!path);
3862
3863         btrfs_assert_tree_locked(parent);
3864         parent_level = btrfs_header_level(parent);
3865         extent_buffer_get(parent);
3866         path->nodes[parent_level] = parent;
3867         path->slots[parent_level] = btrfs_header_nritems(parent);
3868
3869         btrfs_assert_tree_locked(node);
3870         level = btrfs_header_level(node);
3871         extent_buffer_get(node);
3872         path->nodes[level] = node;
3873         path->slots[level] = 0;
3874
3875         while (1) {
3876                 wret = walk_down_subtree(trans, root, path, &level);
3877                 if (wret < 0)
3878                         ret = wret;
3879                 if (wret != 0)
3880                         break;
3881
3882                 wret = walk_up_tree(trans, root, path, &level, parent_level);
3883                 if (wret < 0)
3884                         ret = wret;
3885                 if (wret != 0)
3886                         break;
3887         }
3888
3889         btrfs_free_path(path);
3890         return ret;
3891 }
3892
3893 static unsigned long calc_ra(unsigned long start, unsigned long last,
3894                              unsigned long nr)
3895 {
3896         return min(last, start + nr - 1);
3897 }
3898
3899 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
3900                                          u64 len)
3901 {
3902         u64 page_start;
3903         u64 page_end;
3904         unsigned long first_index;
3905         unsigned long last_index;
3906         unsigned long i;
3907         struct page *page;
3908         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3909         struct file_ra_state *ra;
3910         struct btrfs_ordered_extent *ordered;
3911         unsigned int total_read = 0;
3912         unsigned int total_dirty = 0;
3913         int ret = 0;
3914
3915         ra = kzalloc(sizeof(*ra), GFP_NOFS);
3916
3917         mutex_lock(&inode->i_mutex);
3918         first_index = start >> PAGE_CACHE_SHIFT;
3919         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3920
3921         /* make sure the dirty trick played by the caller work */
3922         ret = invalidate_inode_pages2_range(inode->i_mapping,
3923                                             first_index, last_index);
3924         if (ret)
3925                 goto out_unlock;
3926
3927         file_ra_state_init(ra, inode->i_mapping);
3928
3929         for (i = first_index ; i <= last_index; i++) {
3930                 if (total_read % ra->ra_pages == 0) {
3931                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3932                                        calc_ra(i, last_index, ra->ra_pages));
3933                 }
3934                 total_read++;
3935 again:
3936                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3937                         BUG_ON(1);
3938                 page = grab_cache_page(inode->i_mapping, i);
3939                 if (!page) {
3940                         ret = -ENOMEM;
3941                         goto out_unlock;
3942                 }
3943                 if (!PageUptodate(page)) {
3944                         btrfs_readpage(NULL, page);
3945                         lock_page(page);
3946                         if (!PageUptodate(page)) {
3947                                 unlock_page(page);
3948                                 page_cache_release(page);
3949                                 ret = -EIO;
3950                                 goto out_unlock;
3951                         }
3952                 }
3953                 wait_on_page_writeback(page);
3954
3955                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3956                 page_end = page_start + PAGE_CACHE_SIZE - 1;
3957                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3958
3959                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3960                 if (ordered) {
3961                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3962                         unlock_page(page);
3963                         page_cache_release(page);
3964                         btrfs_start_ordered_extent(inode, ordered, 1);
3965                         btrfs_put_ordered_extent(ordered);
3966                         goto again;
3967                 }
3968                 set_page_extent_mapped(page);
3969
3970                 if (i == first_index)
3971                         set_extent_bits(io_tree, page_start, page_end,
3972                                         EXTENT_BOUNDARY, GFP_NOFS);
3973                 btrfs_set_extent_delalloc(inode, page_start, page_end);
3974
3975                 set_page_dirty(page);
3976                 total_dirty++;
3977
3978                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3979                 unlock_page(page);
3980                 page_cache_release(page);
3981         }
3982
3983 out_unlock:
3984         kfree(ra);
3985         mutex_unlock(&inode->i_mutex);
3986         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
3987         return ret;
3988 }
3989
3990 static noinline int relocate_data_extent(struct inode *reloc_inode,
3991                                          struct btrfs_key *extent_key,
3992                                          u64 offset)
3993 {
3994         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3995         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
3996         struct extent_map *em;
3997         u64 start = extent_key->objectid - offset;
3998         u64 end = start + extent_key->offset - 1;
3999
4000         em = alloc_extent_map(GFP_NOFS);
4001         BUG_ON(!em || IS_ERR(em));
4002
4003         em->start = start;
4004         em->len = extent_key->offset;
4005         em->block_len = extent_key->offset;
4006         em->block_start = extent_key->objectid;
4007         em->bdev = root->fs_info->fs_devices->latest_bdev;
4008         set_bit(EXTENT_FLAG_PINNED, &em->flags);
4009
4010         /* setup extent map to cheat btrfs_readpage */
4011         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4012         while (1) {
4013                 int ret;
4014                 spin_lock(&em_tree->lock);
4015                 ret = add_extent_mapping(em_tree, em);
4016                 spin_unlock(&em_tree->lock);
4017                 if (ret != -EEXIST) {
4018                         free_extent_map(em);
4019                         break;
4020                 }
4021                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4022         }
4023         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4024
4025         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4026 }
4027
4028 struct btrfs_ref_path {
4029         u64 extent_start;
4030         u64 nodes[BTRFS_MAX_LEVEL];
4031         u64 root_objectid;
4032         u64 root_generation;
4033         u64 owner_objectid;
4034         u32 num_refs;
4035         int lowest_level;
4036         int current_level;
4037         int shared_level;
4038
4039         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4040         u64 new_nodes[BTRFS_MAX_LEVEL];
4041 };
4042
4043 struct disk_extent {
4044         u64 ram_bytes;
4045         u64 disk_bytenr;
4046         u64 disk_num_bytes;
4047         u64 offset;
4048         u64 num_bytes;
4049         u8 compression;
4050         u8 encryption;
4051         u16 other_encoding;
4052 };
4053
4054 static int is_cowonly_root(u64 root_objectid)
4055 {
4056         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4057             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4058             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4059             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4060             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4061             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4062                 return 1;
4063         return 0;
4064 }
4065
4066 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4067                                     struct btrfs_root *extent_root,
4068                                     struct btrfs_ref_path *ref_path,
4069                                     int first_time)
4070 {
4071         struct extent_buffer *leaf;
4072         struct btrfs_path *path;
4073         struct btrfs_extent_ref *ref;
4074         struct btrfs_key key;
4075         struct btrfs_key found_key;
4076         u64 bytenr;
4077         u32 nritems;
4078         int level;
4079         int ret = 1;
4080
4081         path = btrfs_alloc_path();
4082         if (!path)
4083                 return -ENOMEM;
4084
4085         if (first_time) {
4086                 ref_path->lowest_level = -1;
4087                 ref_path->current_level = -1;
4088                 ref_path->shared_level = -1;
4089                 goto walk_up;
4090         }
4091 walk_down:
4092         level = ref_path->current_level - 1;
4093         while (level >= -1) {
4094                 u64 parent;
4095                 if (level < ref_path->lowest_level)
4096                         break;
4097
4098                 if (level >= 0)
4099                         bytenr = ref_path->nodes[level];
4100                 else
4101                         bytenr = ref_path->extent_start;
4102                 BUG_ON(bytenr == 0);
4103
4104                 parent = ref_path->nodes[level + 1];
4105                 ref_path->nodes[level + 1] = 0;
4106                 ref_path->current_level = level;
4107                 BUG_ON(parent == 0);
4108
4109                 key.objectid = bytenr;
4110                 key.offset = parent + 1;
4111                 key.type = BTRFS_EXTENT_REF_KEY;
4112
4113                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4114                 if (ret < 0)
4115                         goto out;
4116                 BUG_ON(ret == 0);
4117
4118                 leaf = path->nodes[0];
4119                 nritems = btrfs_header_nritems(leaf);
4120                 if (path->slots[0] >= nritems) {
4121                         ret = btrfs_next_leaf(extent_root, path);
4122                         if (ret < 0)
4123                                 goto out;
4124                         if (ret > 0)
4125                                 goto next;
4126                         leaf = path->nodes[0];
4127                 }
4128
4129                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4130                 if (found_key.objectid == bytenr &&
4131                     found_key.type == BTRFS_EXTENT_REF_KEY) {
4132                         if (level < ref_path->shared_level)
4133                                 ref_path->shared_level = level;
4134                         goto found;
4135                 }
4136 next:
4137                 level--;
4138                 btrfs_release_path(extent_root, path);
4139                 cond_resched();
4140         }
4141         /* reached lowest level */
4142         ret = 1;
4143         goto out;
4144 walk_up:
4145         level = ref_path->current_level;
4146         while (level < BTRFS_MAX_LEVEL - 1) {
4147                 u64 ref_objectid;
4148
4149                 if (level >= 0)
4150                         bytenr = ref_path->nodes[level];
4151                 else
4152                         bytenr = ref_path->extent_start;
4153
4154                 BUG_ON(bytenr == 0);
4155
4156                 key.objectid = bytenr;
4157                 key.offset = 0;
4158                 key.type = BTRFS_EXTENT_REF_KEY;
4159
4160                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4161                 if (ret < 0)
4162                         goto out;
4163
4164                 leaf = path->nodes[0];
4165                 nritems = btrfs_header_nritems(leaf);
4166                 if (path->slots[0] >= nritems) {
4167                         ret = btrfs_next_leaf(extent_root, path);
4168                         if (ret < 0)
4169                                 goto out;
4170                         if (ret > 0) {
4171                                 /* the extent was freed by someone */
4172                                 if (ref_path->lowest_level == level)
4173                                         goto out;
4174                                 btrfs_release_path(extent_root, path);
4175                                 goto walk_down;
4176                         }
4177                         leaf = path->nodes[0];
4178                 }
4179
4180                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4181                 if (found_key.objectid != bytenr ||
4182                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
4183                         /* the extent was freed by someone */
4184                         if (ref_path->lowest_level == level) {
4185                                 ret = 1;
4186                                 goto out;
4187                         }
4188                         btrfs_release_path(extent_root, path);
4189                         goto walk_down;
4190                 }
4191 found:
4192                 ref = btrfs_item_ptr(leaf, path->slots[0],
4193                                 struct btrfs_extent_ref);
4194                 ref_objectid = btrfs_ref_objectid(leaf, ref);
4195                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4196                         if (first_time) {
4197                                 level = (int)ref_objectid;
4198                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
4199                                 ref_path->lowest_level = level;
4200                                 ref_path->current_level = level;
4201                                 ref_path->nodes[level] = bytenr;
4202                         } else {
4203                                 WARN_ON(ref_objectid != level);
4204                         }
4205                 } else {
4206                         WARN_ON(level != -1);
4207                 }
4208                 first_time = 0;
4209
4210                 if (ref_path->lowest_level == level) {
4211                         ref_path->owner_objectid = ref_objectid;
4212                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4213                 }
4214
4215                 /*
4216                  * the block is tree root or the block isn't in reference
4217                  * counted tree.
4218                  */
4219                 if (found_key.objectid == found_key.offset ||
4220                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4221                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4222                         ref_path->root_generation =
4223                                 btrfs_ref_generation(leaf, ref);
4224                         if (level < 0) {
4225                                 /* special reference from the tree log */
4226                                 ref_path->nodes[0] = found_key.offset;
4227                                 ref_path->current_level = 0;
4228                         }
4229                         ret = 0;
4230                         goto out;
4231                 }
4232
4233                 level++;
4234                 BUG_ON(ref_path->nodes[level] != 0);
4235                 ref_path->nodes[level] = found_key.offset;
4236                 ref_path->current_level = level;
4237
4238                 /*
4239                  * the reference was created in the running transaction,
4240                  * no need to continue walking up.
4241                  */
4242                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4243                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4244                         ref_path->root_generation =
4245                                 btrfs_ref_generation(leaf, ref);
4246                         ret = 0;
4247                         goto out;
4248                 }
4249
4250                 btrfs_release_path(extent_root, path);
4251                 cond_resched();
4252         }
4253         /* reached max tree level, but no tree root found. */
4254         BUG();
4255 out:
4256         btrfs_free_path(path);
4257         return ret;
4258 }
4259
4260 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4261                                 struct btrfs_root *extent_root,
4262                                 struct btrfs_ref_path *ref_path,
4263                                 u64 extent_start)
4264 {
4265         memset(ref_path, 0, sizeof(*ref_path));
4266         ref_path->extent_start = extent_start;
4267
4268         return __next_ref_path(trans, extent_root, ref_path, 1);
4269 }
4270
4271 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4272                                struct btrfs_root *extent_root,
4273                                struct btrfs_ref_path *ref_path)
4274 {
4275         return __next_ref_path(trans, extent_root, ref_path, 0);
4276 }
4277
4278 static noinline int get_new_locations(struct inode *reloc_inode,
4279                                       struct btrfs_key *extent_key,
4280                                       u64 offset, int no_fragment,
4281                                       struct disk_extent **extents,
4282                                       int *nr_extents)
4283 {
4284         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4285         struct btrfs_path *path;
4286         struct btrfs_file_extent_item *fi;
4287         struct extent_buffer *leaf;
4288         struct disk_extent *exts = *extents;
4289         struct btrfs_key found_key;
4290         u64 cur_pos;
4291         u64 last_byte;
4292         u32 nritems;
4293         int nr = 0;
4294         int max = *nr_extents;
4295         int ret;
4296
4297         WARN_ON(!no_fragment && *extents);
4298         if (!exts) {
4299                 max = 1;
4300                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4301                 if (!exts)
4302                         return -ENOMEM;
4303         }
4304
4305         path = btrfs_alloc_path();
4306         BUG_ON(!path);
4307
4308         cur_pos = extent_key->objectid - offset;
4309         last_byte = extent_key->objectid + extent_key->offset;
4310         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4311                                        cur_pos, 0);
4312         if (ret < 0)
4313                 goto out;
4314         if (ret > 0) {
4315                 ret = -ENOENT;
4316                 goto out;
4317         }
4318
4319         while (1) {
4320                 leaf = path->nodes[0];
4321                 nritems = btrfs_header_nritems(leaf);
4322                 if (path->slots[0] >= nritems) {
4323                         ret = btrfs_next_leaf(root, path);
4324                         if (ret < 0)
4325                                 goto out;
4326                         if (ret > 0)
4327                                 break;
4328                         leaf = path->nodes[0];
4329                 }
4330
4331                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4332                 if (found_key.offset != cur_pos ||
4333                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
4334                     found_key.objectid != reloc_inode->i_ino)
4335                         break;
4336
4337                 fi = btrfs_item_ptr(leaf, path->slots[0],
4338                                     struct btrfs_file_extent_item);
4339                 if (btrfs_file_extent_type(leaf, fi) !=
4340                     BTRFS_FILE_EXTENT_REG ||
4341                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4342                         break;
4343
4344                 if (nr == max) {
4345                         struct disk_extent *old = exts;
4346                         max *= 2;
4347                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4348                         memcpy(exts, old, sizeof(*exts) * nr);
4349                         if (old != *extents)
4350                                 kfree(old);
4351                 }
4352
4353                 exts[nr].disk_bytenr =
4354                         btrfs_file_extent_disk_bytenr(leaf, fi);
4355                 exts[nr].disk_num_bytes =
4356                         btrfs_file_extent_disk_num_bytes(leaf, fi);
4357                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4358                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4359                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4360                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4361                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4362                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4363                                                                            fi);
4364                 BUG_ON(exts[nr].offset > 0);
4365                 BUG_ON(exts[nr].compression || exts[nr].encryption);
4366                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4367
4368                 cur_pos += exts[nr].num_bytes;
4369                 nr++;
4370
4371                 if (cur_pos + offset >= last_byte)
4372                         break;
4373
4374                 if (no_fragment) {
4375                         ret = 1;
4376                         goto out;
4377                 }
4378                 path->slots[0]++;
4379         }
4380
4381         BUG_ON(cur_pos + offset > last_byte);
4382         if (cur_pos + offset < last_byte) {
4383                 ret = -ENOENT;
4384                 goto out;
4385         }
4386         ret = 0;
4387 out:
4388         btrfs_free_path(path);
4389         if (ret) {
4390                 if (exts != *extents)
4391                         kfree(exts);
4392         } else {
4393                 *extents = exts;
4394                 *nr_extents = nr;
4395         }
4396         return ret;
4397 }
4398
4399 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4400                                         struct btrfs_root *root,
4401                                         struct btrfs_path *path,
4402                                         struct btrfs_key *extent_key,
4403                                         struct btrfs_key *leaf_key,
4404                                         struct btrfs_ref_path *ref_path,
4405                                         struct disk_extent *new_extents,
4406                                         int nr_extents)
4407 {
4408         struct extent_buffer *leaf;
4409         struct btrfs_file_extent_item *fi;
4410         struct inode *inode = NULL;
4411         struct btrfs_key key;
4412         u64 lock_start = 0;
4413         u64 lock_end = 0;
4414         u64 num_bytes;
4415         u64 ext_offset;
4416         u64 search_end = (u64)-1;
4417         u32 nritems;
4418         int nr_scaned = 0;
4419         int extent_locked = 0;
4420         int extent_type;
4421         int ret;
4422
4423         memcpy(&key, leaf_key, sizeof(key));
4424         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4425                 if (key.objectid < ref_path->owner_objectid ||
4426                     (key.objectid == ref_path->owner_objectid &&
4427                      key.type < BTRFS_EXTENT_DATA_KEY)) {
4428                         key.objectid = ref_path->owner_objectid;
4429                         key.type = BTRFS_EXTENT_DATA_KEY;
4430                         key.offset = 0;
4431                 }
4432         }
4433
4434         while (1) {
4435                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4436                 if (ret < 0)
4437                         goto out;
4438
4439                 leaf = path->nodes[0];
4440                 nritems = btrfs_header_nritems(leaf);
4441 next:
4442                 if (extent_locked && ret > 0) {
4443                         /*
4444                          * the file extent item was modified by someone
4445                          * before the extent got locked.
4446                          */
4447                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4448                                       lock_end, GFP_NOFS);
4449                         extent_locked = 0;
4450                 }
4451
4452                 if (path->slots[0] >= nritems) {
4453                         if (++nr_scaned > 2)
4454                                 break;
4455
4456                         BUG_ON(extent_locked);
4457                         ret = btrfs_next_leaf(root, path);
4458                         if (ret < 0)
4459                                 goto out;
4460                         if (ret > 0)
4461                                 break;
4462                         leaf = path->nodes[0];
4463                         nritems = btrfs_header_nritems(leaf);
4464                 }
4465
4466                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4467
4468                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4469                         if ((key.objectid > ref_path->owner_objectid) ||
4470                             (key.objectid == ref_path->owner_objectid &&
4471                              key.type > BTRFS_EXTENT_DATA_KEY) ||
4472                             key.offset >= search_end)
4473                                 break;
4474                 }
4475
4476                 if (inode && key.objectid != inode->i_ino) {
4477                         BUG_ON(extent_locked);
4478                         btrfs_release_path(root, path);
4479                         mutex_unlock(&inode->i_mutex);
4480                         iput(inode);
4481                         inode = NULL;
4482                         continue;
4483                 }
4484
4485                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4486                         path->slots[0]++;
4487                         ret = 1;
4488                         goto next;
4489                 }
4490                 fi = btrfs_item_ptr(leaf, path->slots[0],
4491                                     struct btrfs_file_extent_item);
4492                 extent_type = btrfs_file_extent_type(leaf, fi);
4493                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4494                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4495                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4496                      extent_key->objectid)) {
4497                         path->slots[0]++;
4498                         ret = 1;
4499                         goto next;
4500                 }
4501
4502                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4503                 ext_offset = btrfs_file_extent_offset(leaf, fi);
4504
4505                 if (search_end == (u64)-1) {
4506                         search_end = key.offset - ext_offset +
4507                                 btrfs_file_extent_ram_bytes(leaf, fi);
4508                 }
4509
4510                 if (!extent_locked) {
4511                         lock_start = key.offset;
4512                         lock_end = lock_start + num_bytes - 1;
4513                 } else {
4514                         if (lock_start > key.offset ||
4515                             lock_end + 1 < key.offset + num_bytes) {
4516                                 unlock_extent(&BTRFS_I(inode)->io_tree,
4517                                               lock_start, lock_end, GFP_NOFS);
4518                                 extent_locked = 0;
4519                         }
4520                 }
4521
4522                 if (!inode) {
4523                         btrfs_release_path(root, path);
4524
4525                         inode = btrfs_iget_locked(root->fs_info->sb,
4526                                                   key.objectid, root);
4527                         if (inode->i_state & I_NEW) {
4528                                 BTRFS_I(inode)->root = root;
4529                                 BTRFS_I(inode)->location.objectid =
4530                                         key.objectid;
4531                                 BTRFS_I(inode)->location.type =
4532                                         BTRFS_INODE_ITEM_KEY;
4533                                 BTRFS_I(inode)->location.offset = 0;
4534                                 btrfs_read_locked_inode(inode);
4535                                 unlock_new_inode(inode);
4536                         }
4537                         /*
4538                          * some code call btrfs_commit_transaction while
4539                          * holding the i_mutex, so we can't use mutex_lock
4540                          * here.
4541                          */
4542                         if (is_bad_inode(inode) ||
4543                             !mutex_trylock(&inode->i_mutex)) {
4544                                 iput(inode);
4545                                 inode = NULL;
4546                                 key.offset = (u64)-1;
4547                                 goto skip;
4548                         }
4549                 }
4550
4551                 if (!extent_locked) {
4552                         struct btrfs_ordered_extent *ordered;
4553
4554                         btrfs_release_path(root, path);
4555
4556                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4557                                     lock_end, GFP_NOFS);
4558                         ordered = btrfs_lookup_first_ordered_extent(inode,
4559                                                                     lock_end);
4560                         if (ordered &&
4561                             ordered->file_offset <= lock_end &&
4562                             ordered->file_offset + ordered->len > lock_start) {
4563                                 unlock_extent(&BTRFS_I(inode)->io_tree,
4564                                               lock_start, lock_end, GFP_NOFS);
4565                                 btrfs_start_ordered_extent(inode, ordered, 1);
4566                                 btrfs_put_ordered_extent(ordered);
4567                                 key.offset += num_bytes;
4568                                 goto skip;
4569                         }
4570                         if (ordered)
4571                                 btrfs_put_ordered_extent(ordered);
4572
4573                         extent_locked = 1;
4574                         continue;
4575                 }
4576
4577                 if (nr_extents == 1) {
4578                         /* update extent pointer in place */
4579                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
4580                                                 new_extents[0].disk_bytenr);
4581                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4582                                                 new_extents[0].disk_num_bytes);
4583                         btrfs_mark_buffer_dirty(leaf);
4584
4585                         btrfs_drop_extent_cache(inode, key.offset,
4586                                                 key.offset + num_bytes - 1, 0);
4587
4588                         ret = btrfs_inc_extent_ref(trans, root,
4589                                                 new_extents[0].disk_bytenr,
4590                                                 new_extents[0].disk_num_bytes,
4591                                                 leaf->start,
4592                                                 root->root_key.objectid,
4593                                                 trans->transid,
4594                                                 key.objectid);
4595                         BUG_ON(ret);
4596
4597                         ret = btrfs_free_extent(trans, root,
4598                                                 extent_key->objectid,
4599                                                 extent_key->offset,
4600                                                 leaf->start,
4601                                                 btrfs_header_owner(leaf),
4602                                                 btrfs_header_generation(leaf),
4603                                                 key.objectid, 0);
4604                         BUG_ON(ret);
4605
4606                         btrfs_release_path(root, path);
4607                         key.offset += num_bytes;
4608                 } else {
4609                         BUG_ON(1);
4610 #if 0
4611                         u64 alloc_hint;
4612                         u64 extent_len;
4613                         int i;
4614                         /*
4615                          * drop old extent pointer at first, then insert the
4616                          * new pointers one bye one
4617                          */
4618                         btrfs_release_path(root, path);
4619                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
4620                                                  key.offset + num_bytes,
4621                                                  key.offset, &alloc_hint);
4622                         BUG_ON(ret);
4623
4624                         for (i = 0; i < nr_extents; i++) {
4625                                 if (ext_offset >= new_extents[i].num_bytes) {
4626                                         ext_offset -= new_extents[i].num_bytes;
4627                                         continue;
4628                                 }
4629                                 extent_len = min(new_extents[i].num_bytes -
4630                                                  ext_offset, num_bytes);
4631
4632                                 ret = btrfs_insert_empty_item(trans, root,
4633                                                               path, &key,
4634                                                               sizeof(*fi));
4635                                 BUG_ON(ret);
4636
4637                                 leaf = path->nodes[0];
4638                                 fi = btrfs_item_ptr(leaf, path->slots[0],
4639                                                 struct btrfs_file_extent_item);
4640                                 btrfs_set_file_extent_generation(leaf, fi,
4641                                                         trans->transid);
4642                                 btrfs_set_file_extent_type(leaf, fi,
4643                                                         BTRFS_FILE_EXTENT_REG);
4644                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4645                                                 new_extents[i].disk_bytenr);
4646                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4647                                                 new_extents[i].disk_num_bytes);
4648                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
4649                                                 new_extents[i].ram_bytes);
4650
4651                                 btrfs_set_file_extent_compression(leaf, fi,
4652                                                 new_extents[i].compression);
4653                                 btrfs_set_file_extent_encryption(leaf, fi,
4654                                                 new_extents[i].encryption);
4655                                 btrfs_set_file_extent_other_encoding(leaf, fi,
4656                                                 new_extents[i].other_encoding);
4657
4658                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4659                                                         extent_len);
4660                                 ext_offset += new_extents[i].offset;
4661                                 btrfs_set_file_extent_offset(leaf, fi,
4662                                                         ext_offset);
4663                                 btrfs_mark_buffer_dirty(leaf);
4664
4665                                 btrfs_drop_extent_cache(inode, key.offset,
4666                                                 key.offset + extent_len - 1, 0);
4667
4668                                 ret = btrfs_inc_extent_ref(trans, root,
4669                                                 new_extents[i].disk_bytenr,
4670                                                 new_extents[i].disk_num_bytes,
4671                                                 leaf->start,
4672                                                 root->root_key.objectid,
4673                                                 trans->transid, key.objectid);
4674                                 BUG_ON(ret);
4675                                 btrfs_release_path(root, path);
4676
4677                                 inode_add_bytes(inode, extent_len);
4678
4679                                 ext_offset = 0;
4680                                 num_bytes -= extent_len;
4681                                 key.offset += extent_len;
4682
4683                                 if (num_bytes == 0)
4684                                         break;
4685                         }
4686                         BUG_ON(i >= nr_extents);
4687 #endif
4688                 }
4689
4690                 if (extent_locked) {
4691                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4692                                       lock_end, GFP_NOFS);
4693                         extent_locked = 0;
4694                 }
4695 skip:
4696                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4697                     key.offset >= search_end)
4698                         break;
4699
4700                 cond_resched();
4701         }
4702         ret = 0;
4703 out:
4704         btrfs_release_path(root, path);
4705         if (inode) {
4706                 mutex_unlock(&inode->i_mutex);
4707                 if (extent_locked) {
4708                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4709                                       lock_end, GFP_NOFS);
4710                 }
4711                 iput(inode);
4712         }
4713         return ret;
4714 }
4715
4716 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4717                                struct btrfs_root *root,
4718                                struct extent_buffer *buf, u64 orig_start)
4719 {
4720         int level;
4721         int ret;
4722
4723         BUG_ON(btrfs_header_generation(buf) != trans->transid);
4724         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4725
4726         level = btrfs_header_level(buf);
4727         if (level == 0) {
4728                 struct btrfs_leaf_ref *ref;
4729                 struct btrfs_leaf_ref *orig_ref;
4730
4731                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4732                 if (!orig_ref)
4733                         return -ENOENT;
4734
4735                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4736                 if (!ref) {
4737                         btrfs_free_leaf_ref(root, orig_ref);
4738                         return -ENOMEM;
4739                 }
4740
4741                 ref->nritems = orig_ref->nritems;
4742                 memcpy(ref->extents, orig_ref->extents,
4743                         sizeof(ref->extents[0]) * ref->nritems);
4744
4745                 btrfs_free_leaf_ref(root, orig_ref);
4746
4747                 ref->root_gen = trans->transid;
4748                 ref->bytenr = buf->start;
4749                 ref->owner = btrfs_header_owner(buf);
4750                 ref->generation = btrfs_header_generation(buf);
4751
4752                 ret = btrfs_add_leaf_ref(root, ref, 0);
4753                 WARN_ON(ret);
4754                 btrfs_free_leaf_ref(root, ref);
4755         }
4756         return 0;
4757 }
4758
4759 static noinline int invalidate_extent_cache(struct btrfs_root *root,
4760                                         struct extent_buffer *leaf,
4761                                         struct btrfs_block_group_cache *group,
4762                                         struct btrfs_root *target_root)
4763 {
4764         struct btrfs_key key;
4765         struct inode *inode = NULL;
4766         struct btrfs_file_extent_item *fi;
4767         u64 num_bytes;
4768         u64 skip_objectid = 0;
4769         u32 nritems;
4770         u32 i;
4771
4772         nritems = btrfs_header_nritems(leaf);
4773         for (i = 0; i < nritems; i++) {
4774                 btrfs_item_key_to_cpu(leaf, &key, i);
4775                 if (key.objectid == skip_objectid ||
4776                     key.type != BTRFS_EXTENT_DATA_KEY)
4777                         continue;
4778                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4779                 if (btrfs_file_extent_type(leaf, fi) ==
4780                     BTRFS_FILE_EXTENT_INLINE)
4781                         continue;
4782                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4783                         continue;
4784                 if (!inode || inode->i_ino != key.objectid) {
4785                         iput(inode);
4786                         inode = btrfs_ilookup(target_root->fs_info->sb,
4787                                               key.objectid, target_root, 1);
4788                 }
4789                 if (!inode) {
4790                         skip_objectid = key.objectid;
4791                         continue;
4792                 }
4793                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4794
4795                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4796                             key.offset + num_bytes - 1, GFP_NOFS);
4797                 btrfs_drop_extent_cache(inode, key.offset,
4798                                         key.offset + num_bytes - 1, 1);
4799                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4800                               key.offset + num_bytes - 1, GFP_NOFS);
4801                 cond_resched();
4802         }
4803         iput(inode);
4804         return 0;
4805 }
4806
4807 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4808                                         struct btrfs_root *root,
4809                                         struct extent_buffer *leaf,
4810                                         struct btrfs_block_group_cache *group,
4811                                         struct inode *reloc_inode)
4812 {
4813         struct btrfs_key key;
4814         struct btrfs_key extent_key;
4815         struct btrfs_file_extent_item *fi;
4816         struct btrfs_leaf_ref *ref;
4817         struct disk_extent *new_extent;
4818         u64 bytenr;
4819         u64 num_bytes;
4820         u32 nritems;
4821         u32 i;
4822         int ext_index;
4823         int nr_extent;
4824         int ret;
4825
4826         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4827         BUG_ON(!new_extent);
4828
4829         ref = btrfs_lookup_leaf_ref(root, leaf->start);
4830         BUG_ON(!ref);
4831
4832         ext_index = -1;
4833         nritems = btrfs_header_nritems(leaf);
4834         for (i = 0; i < nritems; i++) {
4835                 btrfs_item_key_to_cpu(leaf, &key, i);
4836                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4837                         continue;
4838                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4839                 if (btrfs_file_extent_type(leaf, fi) ==
4840                     BTRFS_FILE_EXTENT_INLINE)
4841                         continue;
4842                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4843                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4844                 if (bytenr == 0)
4845                         continue;
4846
4847                 ext_index++;
4848                 if (bytenr >= group->key.objectid + group->key.offset ||
4849                     bytenr + num_bytes <= group->key.objectid)
4850                         continue;
4851
4852                 extent_key.objectid = bytenr;
4853                 extent_key.offset = num_bytes;
4854                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4855                 nr_extent = 1;
4856                 ret = get_new_locations(reloc_inode, &extent_key,
4857                                         group->key.objectid, 1,
4858                                         &new_extent, &nr_extent);
4859                 if (ret > 0)
4860                         continue;
4861                 BUG_ON(ret < 0);
4862
4863                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4864                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4865                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4866                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4867
4868                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4869                                                 new_extent->disk_bytenr);
4870                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4871                                                 new_extent->disk_num_bytes);
4872                 btrfs_mark_buffer_dirty(leaf);
4873
4874                 ret = btrfs_inc_extent_ref(trans, root,
4875                                         new_extent->disk_bytenr,
4876                                         new_extent->disk_num_bytes,
4877                                         leaf->start,
4878                                         root->root_key.objectid,
4879                                         trans->transid, key.objectid);
4880                 BUG_ON(ret);
4881
4882                 ret = btrfs_free_extent(trans, root,
4883                                         bytenr, num_bytes, leaf->start,
4884                                         btrfs_header_owner(leaf),
4885                                         btrfs_header_generation(leaf),
4886                                         key.objectid, 0);
4887                 BUG_ON(ret);
4888                 cond_resched();
4889         }
4890         kfree(new_extent);
4891         BUG_ON(ext_index + 1 != ref->nritems);
4892         btrfs_free_leaf_ref(root, ref);
4893         return 0;
4894 }
4895
4896 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4897                           struct btrfs_root *root)
4898 {
4899         struct btrfs_root *reloc_root;
4900         int ret;
4901
4902         if (root->reloc_root) {
4903                 reloc_root = root->reloc_root;
4904                 root->reloc_root = NULL;
4905                 list_add(&reloc_root->dead_list,
4906                          &root->fs_info->dead_reloc_roots);
4907
4908                 btrfs_set_root_bytenr(&reloc_root->root_item,
4909                                       reloc_root->node->start);
4910                 btrfs_set_root_level(&root->root_item,
4911                                      btrfs_header_level(reloc_root->node));
4912                 memset(&reloc_root->root_item.drop_progress, 0,
4913                         sizeof(struct btrfs_disk_key));
4914                 reloc_root->root_item.drop_level = 0;
4915
4916                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4917                                         &reloc_root->root_key,
4918                                         &reloc_root->root_item);
4919                 BUG_ON(ret);
4920         }
4921         return 0;
4922 }
4923
4924 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4925 {
4926         struct btrfs_trans_handle *trans;
4927         struct btrfs_root *reloc_root;
4928         struct btrfs_root *prev_root = NULL;
4929         struct list_head dead_roots;
4930         int ret;
4931         unsigned long nr;
4932
4933         INIT_LIST_HEAD(&dead_roots);
4934         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4935
4936         while (!list_empty(&dead_roots)) {
4937                 reloc_root = list_entry(dead_roots.prev,
4938                                         struct btrfs_root, dead_list);
4939                 list_del_init(&reloc_root->dead_list);
4940
4941                 BUG_ON(reloc_root->commit_root != NULL);
4942                 while (1) {
4943                         trans = btrfs_join_transaction(root, 1);
4944                         BUG_ON(!trans);
4945
4946                         mutex_lock(&root->fs_info->drop_mutex);
4947                         ret = btrfs_drop_snapshot(trans, reloc_root);
4948                         if (ret != -EAGAIN)
4949                                 break;
4950                         mutex_unlock(&root->fs_info->drop_mutex);
4951
4952                         nr = trans->blocks_used;
4953                         ret = btrfs_end_transaction(trans, root);
4954                         BUG_ON(ret);
4955                         btrfs_btree_balance_dirty(root, nr);
4956                 }
4957
4958                 free_extent_buffer(reloc_root->node);
4959
4960                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4961                                      &reloc_root->root_key);
4962                 BUG_ON(ret);
4963                 mutex_unlock(&root->fs_info->drop_mutex);
4964
4965                 nr = trans->blocks_used;
4966                 ret = btrfs_end_transaction(trans, root);
4967                 BUG_ON(ret);
4968                 btrfs_btree_balance_dirty(root, nr);
4969
4970                 kfree(prev_root);
4971                 prev_root = reloc_root;
4972         }
4973         if (prev_root) {
4974                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
4975                 kfree(prev_root);
4976         }
4977         return 0;
4978 }
4979
4980 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
4981 {
4982         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
4983         return 0;
4984 }
4985
4986 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
4987 {
4988         struct btrfs_root *reloc_root;
4989         struct btrfs_trans_handle *trans;
4990         struct btrfs_key location;
4991         int found;
4992         int ret;
4993
4994         mutex_lock(&root->fs_info->tree_reloc_mutex);
4995         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
4996         BUG_ON(ret);
4997         found = !list_empty(&root->fs_info->dead_reloc_roots);
4998         mutex_unlock(&root->fs_info->tree_reloc_mutex);
4999
5000         if (found) {
5001                 trans = btrfs_start_transaction(root, 1);
5002                 BUG_ON(!trans);
5003                 ret = btrfs_commit_transaction(trans, root);
5004                 BUG_ON(ret);
5005         }
5006
5007         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5008         location.offset = (u64)-1;
5009         location.type = BTRFS_ROOT_ITEM_KEY;
5010
5011         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5012         BUG_ON(!reloc_root);
5013         btrfs_orphan_cleanup(reloc_root);
5014         return 0;
5015 }
5016
5017 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5018                                     struct btrfs_root *root)
5019 {
5020         struct btrfs_root *reloc_root;
5021         struct extent_buffer *eb;
5022         struct btrfs_root_item *root_item;
5023         struct btrfs_key root_key;
5024         int ret;
5025
5026         BUG_ON(!root->ref_cows);
5027         if (root->reloc_root)
5028                 return 0;
5029
5030         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5031         BUG_ON(!root_item);
5032
5033         ret = btrfs_copy_root(trans, root, root->commit_root,
5034                               &eb, BTRFS_TREE_RELOC_OBJECTID);
5035         BUG_ON(ret);
5036
5037         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5038         root_key.offset = root->root_key.objectid;
5039         root_key.type = BTRFS_ROOT_ITEM_KEY;
5040
5041         memcpy(root_item, &root->root_item, sizeof(root_item));
5042         btrfs_set_root_refs(root_item, 0);
5043         btrfs_set_root_bytenr(root_item, eb->start);
5044         btrfs_set_root_level(root_item, btrfs_header_level(eb));
5045         btrfs_set_root_generation(root_item, trans->transid);
5046
5047         btrfs_tree_unlock(eb);
5048         free_extent_buffer(eb);
5049
5050         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5051                                 &root_key, root_item);
5052         BUG_ON(ret);
5053         kfree(root_item);
5054
5055         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5056                                                  &root_key);
5057         BUG_ON(!reloc_root);
5058         reloc_root->last_trans = trans->transid;
5059         reloc_root->commit_root = NULL;
5060         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5061
5062         root->reloc_root = reloc_root;
5063         return 0;
5064 }
5065
5066 /*
5067  * Core function of space balance.
5068  *
5069  * The idea is using reloc trees to relocate tree blocks in reference
5070  * counted roots. There is one reloc tree for each subvol, and all
5071  * reloc trees share same root key objectid. Reloc trees are snapshots
5072  * of the latest committed roots of subvols (root->commit_root).
5073  *
5074  * To relocate a tree block referenced by a subvol, there are two steps.
5075  * COW the block through subvol's reloc tree, then update block pointer
5076  * in the subvol to point to the new block. Since all reloc trees share
5077  * same root key objectid, doing special handing for tree blocks owned
5078  * by them is easy. Once a tree block has been COWed in one reloc tree,
5079  * we can use the resulting new block directly when the same block is
5080  * required to COW again through other reloc trees. By this way, relocated
5081  * tree blocks are shared between reloc trees, so they are also shared
5082  * between subvols.
5083  */
5084 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5085                                       struct btrfs_root *root,
5086                                       struct btrfs_path *path,
5087                                       struct btrfs_key *first_key,
5088                                       struct btrfs_ref_path *ref_path,
5089                                       struct btrfs_block_group_cache *group,
5090                                       struct inode *reloc_inode)
5091 {
5092         struct btrfs_root *reloc_root;
5093         struct extent_buffer *eb = NULL;
5094         struct btrfs_key *keys;
5095         u64 *nodes;
5096         int level;
5097         int shared_level;
5098         int lowest_level = 0;
5099         int ret;
5100
5101         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5102                 lowest_level = ref_path->owner_objectid;
5103
5104         if (!root->ref_cows) {
5105                 path->lowest_level = lowest_level;
5106                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5107                 BUG_ON(ret < 0);
5108                 path->lowest_level = 0;
5109                 btrfs_release_path(root, path);
5110                 return 0;
5111         }
5112
5113         mutex_lock(&root->fs_info->tree_reloc_mutex);
5114         ret = init_reloc_tree(trans, root);
5115         BUG_ON(ret);
5116         reloc_root = root->reloc_root;
5117
5118         shared_level = ref_path->shared_level;
5119         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5120
5121         keys = ref_path->node_keys;
5122         nodes = ref_path->new_nodes;
5123         memset(&keys[shared_level + 1], 0,
5124                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5125         memset(&nodes[shared_level + 1], 0,
5126                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5127
5128         if (nodes[lowest_level] == 0) {
5129                 path->lowest_level = lowest_level;
5130                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5131                                         0, 1);
5132                 BUG_ON(ret);
5133                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5134                         eb = path->nodes[level];
5135                         if (!eb || eb == reloc_root->node)
5136                                 break;
5137                         nodes[level] = eb->start;
5138                         if (level == 0)
5139                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5140                         else
5141                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5142                 }
5143                 if (nodes[0] &&
5144                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5145                         eb = path->nodes[0];
5146                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
5147                                                       group, reloc_inode);
5148                         BUG_ON(ret);
5149                 }
5150                 btrfs_release_path(reloc_root, path);
5151         } else {
5152                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5153                                        lowest_level);
5154                 BUG_ON(ret);
5155         }
5156
5157         /*
5158          * replace tree blocks in the fs tree with tree blocks in
5159          * the reloc tree.
5160          */
5161         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5162         BUG_ON(ret < 0);
5163
5164         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5165                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5166                                         0, 0);
5167                 BUG_ON(ret);
5168                 extent_buffer_get(path->nodes[0]);
5169                 eb = path->nodes[0];
5170                 btrfs_release_path(reloc_root, path);
5171                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5172                 BUG_ON(ret);
5173                 free_extent_buffer(eb);
5174         }
5175
5176         mutex_unlock(&root->fs_info->tree_reloc_mutex);
5177         path->lowest_level = 0;
5178         return 0;
5179 }
5180
5181 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5182                                         struct btrfs_root *root,
5183                                         struct btrfs_path *path,
5184                                         struct btrfs_key *first_key,
5185                                         struct btrfs_ref_path *ref_path)
5186 {
5187         int ret;
5188
5189         ret = relocate_one_path(trans, root, path, first_key,
5190                                 ref_path, NULL, NULL);
5191         BUG_ON(ret);
5192
5193         return 0;
5194 }
5195
5196 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5197                                     struct btrfs_root *extent_root,
5198                                     struct btrfs_path *path,
5199                                     struct btrfs_key *extent_key)
5200 {
5201         int ret;
5202
5203         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5204         if (ret)
5205                 goto out;
5206         ret = btrfs_del_item(trans, extent_root, path);
5207 out:
5208         btrfs_release_path(extent_root, path);
5209         return ret;
5210 }
5211
5212 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5213                                                 struct btrfs_ref_path *ref_path)
5214 {
5215         struct btrfs_key root_key;
5216
5217         root_key.objectid = ref_path->root_objectid;
5218         root_key.type = BTRFS_ROOT_ITEM_KEY;
5219         if (is_cowonly_root(ref_path->root_objectid))
5220                 root_key.offset = 0;
5221         else
5222                 root_key.offset = (u64)-1;
5223
5224         return btrfs_read_fs_root_no_name(fs_info, &root_key);
5225 }
5226
5227 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5228                                         struct btrfs_path *path,
5229                                         struct btrfs_key *extent_key,
5230                                         struct btrfs_block_group_cache *group,
5231                                         struct inode *reloc_inode, int pass)
5232 {
5233         struct btrfs_trans_handle *trans;
5234         struct btrfs_root *found_root;
5235         struct btrfs_ref_path *ref_path = NULL;
5236         struct disk_extent *new_extents = NULL;
5237         int nr_extents = 0;
5238         int loops;
5239         int ret;
5240         int level;
5241         struct btrfs_key first_key;
5242         u64 prev_block = 0;
5243
5244
5245         trans = btrfs_start_transaction(extent_root, 1);
5246         BUG_ON(!trans);
5247
5248         if (extent_key->objectid == 0) {
5249                 ret = del_extent_zero(trans, extent_root, path, extent_key);
5250                 goto out;
5251         }
5252
5253         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5254         if (!ref_path) {
5255                 ret = -ENOMEM;
5256                 goto out;
5257         }
5258
5259         for (loops = 0; ; loops++) {
5260                 if (loops == 0) {
5261                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5262                                                    extent_key->objectid);
5263                 } else {
5264                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5265                 }
5266                 if (ret < 0)
5267                         goto out;
5268                 if (ret > 0)
5269                         break;
5270
5271                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5272                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5273                         continue;
5274
5275                 found_root = read_ref_root(extent_root->fs_info, ref_path);
5276                 BUG_ON(!found_root);
5277                 /*
5278                  * for reference counted tree, only process reference paths
5279                  * rooted at the latest committed root.
5280                  */
5281                 if (found_root->ref_cows &&
5282                     ref_path->root_generation != found_root->root_key.offset)
5283                         continue;
5284
5285                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5286                         if (pass == 0) {
5287                                 /*
5288                                  * copy data extents to new locations
5289                                  */
5290                                 u64 group_start = group->key.objectid;
5291                                 ret = relocate_data_extent(reloc_inode,
5292                                                            extent_key,
5293                                                            group_start);
5294                                 if (ret < 0)
5295                                         goto out;
5296                                 break;
5297                         }
5298                         level = 0;
5299                 } else {
5300                         level = ref_path->owner_objectid;
5301                 }
5302
5303                 if (prev_block != ref_path->nodes[level]) {
5304                         struct extent_buffer *eb;
5305                         u64 block_start = ref_path->nodes[level];
5306                         u64 block_size = btrfs_level_size(found_root, level);
5307
5308                         eb = read_tree_block(found_root, block_start,
5309                                              block_size, 0);
5310                         btrfs_tree_lock(eb);
5311                         BUG_ON(level != btrfs_header_level(eb));
5312
5313                         if (level == 0)
5314                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
5315                         else
5316                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
5317
5318                         btrfs_tree_unlock(eb);
5319                         free_extent_buffer(eb);
5320                         prev_block = block_start;
5321                 }
5322
5323                 mutex_lock(&extent_root->fs_info->trans_mutex);
5324                 btrfs_record_root_in_trans(found_root);
5325                 mutex_unlock(&extent_root->fs_info->trans_mutex);
5326                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5327                         /*
5328                          * try to update data extent references while
5329                          * keeping metadata shared between snapshots.
5330                          */
5331                         if (pass == 1) {
5332                                 ret = relocate_one_path(trans, found_root,
5333                                                 path, &first_key, ref_path,
5334                                                 group, reloc_inode);
5335                                 if (ret < 0)
5336                                         goto out;
5337                                 continue;
5338                         }
5339                         /*
5340                          * use fallback method to process the remaining
5341                          * references.
5342                          */
5343                         if (!new_extents) {
5344                                 u64 group_start = group->key.objectid;
5345                                 new_extents = kmalloc(sizeof(*new_extents),
5346                                                       GFP_NOFS);
5347                                 nr_extents = 1;
5348                                 ret = get_new_locations(reloc_inode,
5349                                                         extent_key,
5350                                                         group_start, 1,
5351                                                         &new_extents,
5352                                                         &nr_extents);
5353                                 if (ret)
5354                                         goto out;
5355                         }
5356                         ret = replace_one_extent(trans, found_root,
5357                                                 path, extent_key,
5358                                                 &first_key, ref_path,
5359                                                 new_extents, nr_extents);
5360                 } else {
5361                         ret = relocate_tree_block(trans, found_root, path,
5362                                                   &first_key, ref_path);
5363                 }
5364                 if (ret < 0)
5365                         goto out;
5366         }
5367         ret = 0;
5368 out:
5369         btrfs_end_transaction(trans, extent_root);
5370         kfree(new_extents);
5371         kfree(ref_path);
5372         return ret;
5373 }
5374
5375 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5376 {
5377         u64 num_devices;
5378         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5379                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5380
5381         num_devices = root->fs_info->fs_devices->rw_devices;
5382         if (num_devices == 1) {
5383                 stripped |= BTRFS_BLOCK_GROUP_DUP;
5384                 stripped = flags & ~stripped;
5385
5386                 /* turn raid0 into single device chunks */
5387                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5388                         return stripped;
5389
5390                 /* turn mirroring into duplication */
5391                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5392                              BTRFS_BLOCK_GROUP_RAID10))
5393                         return stripped | BTRFS_BLOCK_GROUP_DUP;
5394                 return flags;
5395         } else {
5396                 /* they already had raid on here, just return */
5397                 if (flags & stripped)
5398                         return flags;
5399
5400                 stripped |= BTRFS_BLOCK_GROUP_DUP;
5401                 stripped = flags & ~stripped;
5402
5403                 /* switch duplicated blocks with raid1 */
5404                 if (flags & BTRFS_BLOCK_GROUP_DUP)
5405                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
5406
5407                 /* turn single device chunks into raid0 */
5408                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5409         }
5410         return flags;
5411 }
5412
5413 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5414                      struct btrfs_block_group_cache *shrink_block_group,
5415                      int force)
5416 {
5417         struct btrfs_trans_handle *trans;
5418         u64 new_alloc_flags;
5419         u64 calc;
5420
5421         spin_lock(&shrink_block_group->lock);
5422         if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5423                 spin_unlock(&shrink_block_group->lock);
5424
5425                 trans = btrfs_start_transaction(root, 1);
5426                 spin_lock(&shrink_block_group->lock);
5427
5428                 new_alloc_flags = update_block_group_flags(root,
5429                                                    shrink_block_group->flags);
5430                 if (new_alloc_flags != shrink_block_group->flags) {
5431                         calc =
5432                              btrfs_block_group_used(&shrink_block_group->item);
5433                 } else {
5434                         calc = shrink_block_group->key.offset;
5435                 }
5436                 spin_unlock(&shrink_block_group->lock);
5437
5438                 do_chunk_alloc(trans, root->fs_info->extent_root,
5439                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
5440
5441                 btrfs_end_transaction(trans, root);
5442         } else
5443                 spin_unlock(&shrink_block_group->lock);
5444         return 0;
5445 }
5446
5447 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5448                                  struct btrfs_root *root,
5449                                  u64 objectid, u64 size)
5450 {
5451         struct btrfs_path *path;
5452         struct btrfs_inode_item *item;
5453         struct extent_buffer *leaf;
5454         int ret;
5455
5456         path = btrfs_alloc_path();
5457         if (!path)
5458                 return -ENOMEM;
5459
5460         path->leave_spinning = 1;
5461         ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5462         if (ret)
5463                 goto out;
5464
5465         leaf = path->nodes[0];
5466         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5467         memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5468         btrfs_set_inode_generation(leaf, item, 1);
5469         btrfs_set_inode_size(leaf, item, size);
5470         btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5471         btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5472         btrfs_mark_buffer_dirty(leaf);
5473         btrfs_release_path(root, path);
5474 out:
5475         btrfs_free_path(path);
5476         return ret;
5477 }
5478
5479 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5480                                         struct btrfs_block_group_cache *group)
5481 {
5482         struct inode *inode = NULL;
5483         struct btrfs_trans_handle *trans;
5484         struct btrfs_root *root;
5485         struct btrfs_key root_key;
5486         u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5487         int err = 0;
5488
5489         root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5490         root_key.type = BTRFS_ROOT_ITEM_KEY;
5491         root_key.offset = (u64)-1;
5492         root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5493         if (IS_ERR(root))
5494                 return ERR_CAST(root);
5495
5496         trans = btrfs_start_transaction(root, 1);
5497         BUG_ON(!trans);
5498
5499         err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5500         if (err)
5501                 goto out;
5502
5503         err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5504         BUG_ON(err);
5505
5506         err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5507                                        group->key.offset, 0, group->key.offset,
5508                                        0, 0, 0);
5509         BUG_ON(err);
5510
5511         inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5512         if (inode->i_state & I_NEW) {
5513                 BTRFS_I(inode)->root = root;
5514                 BTRFS_I(inode)->location.objectid = objectid;
5515                 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5516                 BTRFS_I(inode)->location.offset = 0;
5517                 btrfs_read_locked_inode(inode);
5518                 unlock_new_inode(inode);
5519                 BUG_ON(is_bad_inode(inode));
5520         } else {
5521                 BUG_ON(1);
5522         }
5523         BTRFS_I(inode)->index_cnt = group->key.objectid;
5524
5525         err = btrfs_orphan_add(trans, inode);
5526 out:
5527         btrfs_end_transaction(trans, root);
5528         if (err) {
5529                 if (inode)
5530                         iput(inode);
5531                 inode = ERR_PTR(err);
5532         }
5533         return inode;
5534 }
5535
5536 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
5537 {
5538
5539         struct btrfs_ordered_sum *sums;
5540         struct btrfs_sector_sum *sector_sum;
5541         struct btrfs_ordered_extent *ordered;
5542         struct btrfs_root *root = BTRFS_I(inode)->root;
5543         struct list_head list;
5544         size_t offset;
5545         int ret;
5546         u64 disk_bytenr;
5547
5548         INIT_LIST_HEAD(&list);
5549
5550         ordered = btrfs_lookup_ordered_extent(inode, file_pos);
5551         BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
5552
5553         disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
5554         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
5555                                        disk_bytenr + len - 1, &list);
5556
5557         while (!list_empty(&list)) {
5558                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
5559                 list_del_init(&sums->list);
5560
5561                 sector_sum = sums->sums;
5562                 sums->bytenr = ordered->start;
5563
5564                 offset = 0;
5565                 while (offset < sums->len) {
5566                         sector_sum->bytenr += ordered->start - disk_bytenr;
5567                         sector_sum++;
5568                         offset += root->sectorsize;
5569                 }
5570
5571                 btrfs_add_ordered_sum(inode, ordered, sums);
5572         }
5573         btrfs_put_ordered_extent(ordered);
5574         return 0;
5575 }
5576
5577 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5578 {
5579         struct btrfs_trans_handle *trans;
5580         struct btrfs_path *path;
5581         struct btrfs_fs_info *info = root->fs_info;
5582         struct extent_buffer *leaf;
5583         struct inode *reloc_inode;
5584         struct btrfs_block_group_cache *block_group;
5585         struct btrfs_key key;
5586         u64 skipped;
5587         u64 cur_byte;
5588         u64 total_found;
5589         u32 nritems;
5590         int ret;
5591         int progress;
5592         int pass = 0;
5593
5594         root = root->fs_info->extent_root;
5595
5596         block_group = btrfs_lookup_block_group(info, group_start);
5597         BUG_ON(!block_group);
5598
5599         printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5600                (unsigned long long)block_group->key.objectid,
5601                (unsigned long long)block_group->flags);
5602
5603         path = btrfs_alloc_path();
5604         BUG_ON(!path);
5605
5606         reloc_inode = create_reloc_inode(info, block_group);
5607         BUG_ON(IS_ERR(reloc_inode));
5608
5609         __alloc_chunk_for_shrink(root, block_group, 1);
5610         set_block_group_readonly(block_group);
5611
5612         btrfs_start_delalloc_inodes(info->tree_root);
5613         btrfs_wait_ordered_extents(info->tree_root, 0);
5614 again:
5615         skipped = 0;
5616         total_found = 0;
5617         progress = 0;
5618         key.objectid = block_group->key.objectid;
5619         key.offset = 0;
5620         key.type = 0;
5621         cur_byte = key.objectid;
5622
5623         trans = btrfs_start_transaction(info->tree_root, 1);
5624         btrfs_commit_transaction(trans, info->tree_root);
5625
5626         mutex_lock(&root->fs_info->cleaner_mutex);
5627         btrfs_clean_old_snapshots(info->tree_root);
5628         btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5629         mutex_unlock(&root->fs_info->cleaner_mutex);
5630
5631         trans = btrfs_start_transaction(info->tree_root, 1);
5632         btrfs_commit_transaction(trans, info->tree_root);
5633
5634         while (1) {
5635                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5636                 if (ret < 0)
5637                         goto out;
5638 next:
5639                 leaf = path->nodes[0];
5640                 nritems = btrfs_header_nritems(leaf);
5641                 if (path->slots[0] >= nritems) {
5642                         ret = btrfs_next_leaf(root, path);
5643                         if (ret < 0)
5644                                 goto out;
5645                         if (ret == 1) {
5646                                 ret = 0;
5647                                 break;
5648                         }
5649                         leaf = path->nodes[0];
5650                         nritems = btrfs_header_nritems(leaf);
5651                 }
5652
5653                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5654
5655                 if (key.objectid >= block_group->key.objectid +
5656                     block_group->key.offset)
5657                         break;
5658
5659                 if (progress && need_resched()) {
5660                         btrfs_release_path(root, path);
5661                         cond_resched();
5662                         progress = 0;
5663                         continue;
5664                 }
5665                 progress = 1;
5666
5667                 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5668                     key.objectid + key.offset <= cur_byte) {
5669                         path->slots[0]++;
5670                         goto next;
5671                 }
5672
5673                 total_found++;
5674                 cur_byte = key.objectid + key.offset;
5675                 btrfs_release_path(root, path);
5676
5677                 __alloc_chunk_for_shrink(root, block_group, 0);
5678                 ret = relocate_one_extent(root, path, &key, block_group,
5679                                           reloc_inode, pass);
5680                 BUG_ON(ret < 0);
5681                 if (ret > 0)
5682                         skipped++;
5683
5684                 key.objectid = cur_byte;
5685                 key.type = 0;
5686                 key.offset = 0;
5687         }
5688
5689         btrfs_release_path(root, path);
5690
5691         if (pass == 0) {
5692                 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5693                 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5694         }
5695
5696         if (total_found > 0) {
5697                 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
5698                        (unsigned long long)total_found, pass);
5699                 pass++;
5700                 if (total_found == skipped && pass > 2) {
5701                         iput(reloc_inode);
5702                         reloc_inode = create_reloc_inode(info, block_group);
5703                         pass = 0;
5704                 }
5705                 goto again;
5706         }
5707
5708         /* delete reloc_inode */
5709         iput(reloc_inode);
5710
5711         /* unpin extents in this range */
5712         trans = btrfs_start_transaction(info->tree_root, 1);
5713         btrfs_commit_transaction(trans, info->tree_root);
5714
5715         spin_lock(&block_group->lock);
5716         WARN_ON(block_group->pinned > 0);
5717         WARN_ON(block_group->reserved > 0);
5718         WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5719         spin_unlock(&block_group->lock);
5720         btrfs_put_block_group(block_group);
5721         ret = 0;
5722 out:
5723         btrfs_free_path(path);
5724         return ret;
5725 }
5726
5727 static int find_first_block_group(struct btrfs_root *root,
5728                 struct btrfs_path *path, struct btrfs_key *key)
5729 {
5730         int ret = 0;
5731         struct btrfs_key found_key;
5732         struct extent_buffer *leaf;
5733         int slot;
5734
5735         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5736         if (ret < 0)
5737                 goto out;
5738
5739         while (1) {
5740                 slot = path->slots[0];
5741                 leaf = path->nodes[0];
5742                 if (slot >= btrfs_header_nritems(leaf)) {
5743                         ret = btrfs_next_leaf(root, path);
5744                         if (ret == 0)
5745                                 continue;
5746                         if (ret < 0)
5747                                 goto out;
5748                         break;
5749                 }
5750                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5751
5752                 if (found_key.objectid >= key->objectid &&
5753                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5754                         ret = 0;
5755                         goto out;
5756                 }
5757                 path->slots[0]++;
5758         }
5759         ret = -ENOENT;
5760 out:
5761         return ret;
5762 }
5763
5764 int btrfs_free_block_groups(struct btrfs_fs_info *info)
5765 {
5766         struct btrfs_block_group_cache *block_group;
5767         struct btrfs_space_info *space_info;
5768         struct rb_node *n;
5769
5770         spin_lock(&info->block_group_cache_lock);
5771         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5772                 block_group = rb_entry(n, struct btrfs_block_group_cache,
5773                                        cache_node);
5774                 rb_erase(&block_group->cache_node,
5775                          &info->block_group_cache_tree);
5776                 spin_unlock(&info->block_group_cache_lock);
5777
5778                 btrfs_remove_free_space_cache(block_group);
5779                 down_write(&block_group->space_info->groups_sem);
5780                 list_del(&block_group->list);
5781                 up_write(&block_group->space_info->groups_sem);
5782
5783                 WARN_ON(atomic_read(&block_group->count) != 1);
5784                 kfree(block_group);
5785
5786                 spin_lock(&info->block_group_cache_lock);
5787         }
5788         spin_unlock(&info->block_group_cache_lock);
5789
5790         /* now that all the block groups are freed, go through and
5791          * free all the space_info structs.  This is only called during
5792          * the final stages of unmount, and so we know nobody is
5793          * using them.  We call synchronize_rcu() once before we start,
5794          * just to be on the safe side.
5795          */
5796         synchronize_rcu();
5797
5798         while(!list_empty(&info->space_info)) {
5799                 space_info = list_entry(info->space_info.next,
5800                                         struct btrfs_space_info,
5801                                         list);
5802
5803                 list_del(&space_info->list);
5804                 kfree(space_info);
5805         }
5806         return 0;
5807 }
5808
5809 int btrfs_read_block_groups(struct btrfs_root *root)
5810 {
5811         struct btrfs_path *path;
5812         int ret;
5813         struct btrfs_block_group_cache *cache;
5814         struct btrfs_fs_info *info = root->fs_info;
5815         struct btrfs_space_info *space_info;
5816         struct btrfs_key key;
5817         struct btrfs_key found_key;
5818         struct extent_buffer *leaf;
5819
5820         root = info->extent_root;
5821         key.objectid = 0;
5822         key.offset = 0;
5823         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5824         path = btrfs_alloc_path();
5825         if (!path)
5826                 return -ENOMEM;
5827
5828         while (1) {
5829                 ret = find_first_block_group(root, path, &key);
5830                 if (ret > 0) {
5831                         ret = 0;
5832                         goto error;
5833                 }
5834                 if (ret != 0)
5835                         goto error;
5836
5837                 leaf = path->nodes[0];
5838                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5839                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5840                 if (!cache) {
5841                         ret = -ENOMEM;
5842                         break;
5843                 }
5844
5845                 atomic_set(&cache->count, 1);
5846                 spin_lock_init(&cache->lock);
5847                 spin_lock_init(&cache->tree_lock);
5848                 mutex_init(&cache->cache_mutex);
5849                 INIT_LIST_HEAD(&cache->list);
5850                 INIT_LIST_HEAD(&cache->cluster_list);
5851                 read_extent_buffer(leaf, &cache->item,
5852                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
5853                                    sizeof(cache->item));
5854                 memcpy(&cache->key, &found_key, sizeof(found_key));
5855
5856                 key.objectid = found_key.objectid + found_key.offset;
5857                 btrfs_release_path(root, path);
5858                 cache->flags = btrfs_block_group_flags(&cache->item);
5859
5860                 ret = update_space_info(info, cache->flags, found_key.offset,
5861                                         btrfs_block_group_used(&cache->item),
5862                                         &space_info);
5863                 BUG_ON(ret);
5864                 cache->space_info = space_info;
5865                 down_write(&space_info->groups_sem);
5866                 list_add_tail(&cache->list, &space_info->block_groups);
5867                 up_write(&space_info->groups_sem);
5868
5869                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5870                 BUG_ON(ret);
5871
5872                 set_avail_alloc_bits(root->fs_info, cache->flags);
5873                 if (btrfs_chunk_readonly(root, cache->key.objectid))
5874                         set_block_group_readonly(cache);
5875         }
5876         ret = 0;
5877 error:
5878         btrfs_free_path(path);
5879         return ret;
5880 }
5881
5882 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5883                            struct btrfs_root *root, u64 bytes_used,
5884                            u64 type, u64 chunk_objectid, u64 chunk_offset,
5885                            u64 size)
5886 {
5887         int ret;
5888         struct btrfs_root *extent_root;
5889         struct btrfs_block_group_cache *cache;
5890
5891         extent_root = root->fs_info->extent_root;
5892
5893         root->fs_info->last_trans_log_full_commit = trans->transid;
5894
5895         cache = kzalloc(sizeof(*cache), GFP_NOFS);
5896         if (!cache)
5897                 return -ENOMEM;
5898
5899         cache->key.objectid = chunk_offset;
5900         cache->key.offset = size;
5901         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
5902         atomic_set(&cache->count, 1);
5903         spin_lock_init(&cache->lock);
5904         spin_lock_init(&cache->tree_lock);
5905         mutex_init(&cache->cache_mutex);
5906         INIT_LIST_HEAD(&cache->list);
5907         INIT_LIST_HEAD(&cache->cluster_list);
5908
5909         btrfs_set_block_group_used(&cache->item, bytes_used);
5910         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5911         cache->flags = type;
5912         btrfs_set_block_group_flags(&cache->item, type);
5913
5914         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5915                                 &cache->space_info);
5916         BUG_ON(ret);
5917         down_write(&cache->space_info->groups_sem);
5918         list_add_tail(&cache->list, &cache->space_info->block_groups);
5919         up_write(&cache->space_info->groups_sem);
5920
5921         ret = btrfs_add_block_group_cache(root->fs_info, cache);
5922         BUG_ON(ret);
5923
5924         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5925                                 sizeof(cache->item));
5926         BUG_ON(ret);
5927
5928         set_avail_alloc_bits(extent_root->fs_info, type);
5929
5930         return 0;
5931 }
5932
5933 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5934                              struct btrfs_root *root, u64 group_start)
5935 {
5936         struct btrfs_path *path;
5937         struct btrfs_block_group_cache *block_group;
5938         struct btrfs_key key;
5939         int ret;
5940
5941         root = root->fs_info->extent_root;
5942
5943         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5944         BUG_ON(!block_group);
5945         BUG_ON(!block_group->ro);
5946
5947         memcpy(&key, &block_group->key, sizeof(key));
5948
5949         path = btrfs_alloc_path();
5950         BUG_ON(!path);
5951
5952         spin_lock(&root->fs_info->block_group_cache_lock);
5953         rb_erase(&block_group->cache_node,
5954                  &root->fs_info->block_group_cache_tree);
5955         spin_unlock(&root->fs_info->block_group_cache_lock);
5956         btrfs_remove_free_space_cache(block_group);
5957         down_write(&block_group->space_info->groups_sem);
5958         list_del(&block_group->list);
5959         up_write(&block_group->space_info->groups_sem);
5960
5961         spin_lock(&block_group->space_info->lock);
5962         block_group->space_info->total_bytes -= block_group->key.offset;
5963         block_group->space_info->bytes_readonly -= block_group->key.offset;
5964         spin_unlock(&block_group->space_info->lock);
5965         block_group->space_info->full = 0;
5966
5967         btrfs_put_block_group(block_group);
5968         btrfs_put_block_group(block_group);
5969
5970         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5971         if (ret > 0)
5972                 ret = -EIO;
5973         if (ret < 0)
5974                 goto out;
5975
5976         ret = btrfs_del_item(trans, root, path);
5977 out:
5978         btrfs_free_path(path);
5979         return ret;
5980 }