2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
26 #include "extent_map.h"
28 #include "transaction.h"
29 #include "print-tree.h"
31 #include "async-thread.h"
41 struct btrfs_bio_stripe stripes[];
44 static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root,
46 struct btrfs_device *device);
47 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
50 (sizeof(struct btrfs_bio_stripe) * (n)))
52 static DEFINE_MUTEX(uuid_mutex);
53 static LIST_HEAD(fs_uuids);
55 void btrfs_lock_volumes(void)
57 mutex_lock(&uuid_mutex);
60 void btrfs_unlock_volumes(void)
62 mutex_unlock(&uuid_mutex);
65 static void lock_chunks(struct btrfs_root *root)
67 mutex_lock(&root->fs_info->chunk_mutex);
70 static void unlock_chunks(struct btrfs_root *root)
72 mutex_unlock(&root->fs_info->chunk_mutex);
75 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
77 struct btrfs_device *device;
78 WARN_ON(fs_devices->opened);
79 while (!list_empty(&fs_devices->devices)) {
80 device = list_entry(fs_devices->devices.next,
81 struct btrfs_device, dev_list);
82 list_del(&device->dev_list);
89 int btrfs_cleanup_fs_uuids(void)
91 struct btrfs_fs_devices *fs_devices;
93 while (!list_empty(&fs_uuids)) {
94 fs_devices = list_entry(fs_uuids.next,
95 struct btrfs_fs_devices, list);
96 list_del(&fs_devices->list);
97 free_fs_devices(fs_devices);
102 static noinline struct btrfs_device *__find_device(struct list_head *head,
105 struct btrfs_device *dev;
107 list_for_each_entry(dev, head, dev_list) {
108 if (dev->devid == devid &&
109 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
116 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
118 struct btrfs_fs_devices *fs_devices;
120 list_for_each_entry(fs_devices, &fs_uuids, list) {
121 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
128 * we try to collect pending bios for a device so we don't get a large
129 * number of procs sending bios down to the same device. This greatly
130 * improves the schedulers ability to collect and merge the bios.
132 * But, it also turns into a long list of bios to process and that is sure
133 * to eventually make the worker thread block. The solution here is to
134 * make some progress and then put this work struct back at the end of
135 * the list if the block device is congested. This way, multiple devices
136 * can make progress from a single worker thread.
138 static noinline int run_scheduled_bios(struct btrfs_device *device)
141 struct backing_dev_info *bdi;
142 struct btrfs_fs_info *fs_info;
146 unsigned long num_run = 0;
149 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
150 fs_info = device->dev_root->fs_info;
151 limit = btrfs_async_submit_limit(fs_info);
152 limit = limit * 2 / 3;
155 spin_lock(&device->io_lock);
158 /* take all the bios off the list at once and process them
159 * later on (without the lock held). But, remember the
160 * tail and other pointers so the bios can be properly reinserted
161 * into the list if we hit congestion
163 pending = device->pending_bios;
164 tail = device->pending_bio_tail;
165 WARN_ON(pending && !tail);
166 device->pending_bios = NULL;
167 device->pending_bio_tail = NULL;
170 * if pending was null this time around, no bios need processing
171 * at all and we can stop. Otherwise it'll loop back up again
172 * and do an additional check so no bios are missed.
174 * device->running_pending is used to synchronize with the
179 device->running_pending = 1;
182 device->running_pending = 0;
184 spin_unlock(&device->io_lock);
188 pending = pending->bi_next;
190 atomic_dec(&fs_info->nr_async_bios);
192 if (atomic_read(&fs_info->nr_async_bios) < limit &&
193 waitqueue_active(&fs_info->async_submit_wait))
194 wake_up(&fs_info->async_submit_wait);
196 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
198 submit_bio(cur->bi_rw, cur);
203 * we made progress, there is more work to do and the bdi
204 * is now congested. Back off and let other work structs
207 if (pending && bdi_write_congested(bdi) && num_run > 16 &&
208 fs_info->fs_devices->open_devices > 1) {
209 struct bio *old_head;
211 spin_lock(&device->io_lock);
213 old_head = device->pending_bios;
214 device->pending_bios = pending;
215 if (device->pending_bio_tail)
216 tail->bi_next = old_head;
218 device->pending_bio_tail = tail;
220 device->running_pending = 1;
222 spin_unlock(&device->io_lock);
223 btrfs_requeue_work(&device->work);
230 spin_lock(&device->io_lock);
231 if (device->pending_bios)
233 spin_unlock(&device->io_lock);
238 static void pending_bios_fn(struct btrfs_work *work)
240 struct btrfs_device *device;
242 device = container_of(work, struct btrfs_device, work);
243 run_scheduled_bios(device);
246 static noinline int device_list_add(const char *path,
247 struct btrfs_super_block *disk_super,
248 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
250 struct btrfs_device *device;
251 struct btrfs_fs_devices *fs_devices;
252 u64 found_transid = btrfs_super_generation(disk_super);
254 fs_devices = find_fsid(disk_super->fsid);
256 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
259 INIT_LIST_HEAD(&fs_devices->devices);
260 INIT_LIST_HEAD(&fs_devices->alloc_list);
261 list_add(&fs_devices->list, &fs_uuids);
262 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
263 fs_devices->latest_devid = devid;
264 fs_devices->latest_trans = found_transid;
267 device = __find_device(&fs_devices->devices, devid,
268 disk_super->dev_item.uuid);
271 if (fs_devices->opened)
274 device = kzalloc(sizeof(*device), GFP_NOFS);
276 /* we can safely leave the fs_devices entry around */
279 device->devid = devid;
280 device->work.func = pending_bios_fn;
281 memcpy(device->uuid, disk_super->dev_item.uuid,
283 device->barriers = 1;
284 spin_lock_init(&device->io_lock);
285 device->name = kstrdup(path, GFP_NOFS);
290 INIT_LIST_HEAD(&device->dev_alloc_list);
291 list_add(&device->dev_list, &fs_devices->devices);
292 device->fs_devices = fs_devices;
293 fs_devices->num_devices++;
296 if (found_transid > fs_devices->latest_trans) {
297 fs_devices->latest_devid = devid;
298 fs_devices->latest_trans = found_transid;
300 *fs_devices_ret = fs_devices;
304 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
306 struct btrfs_fs_devices *fs_devices;
307 struct btrfs_device *device;
308 struct btrfs_device *orig_dev;
310 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
312 return ERR_PTR(-ENOMEM);
314 INIT_LIST_HEAD(&fs_devices->devices);
315 INIT_LIST_HEAD(&fs_devices->alloc_list);
316 INIT_LIST_HEAD(&fs_devices->list);
317 fs_devices->latest_devid = orig->latest_devid;
318 fs_devices->latest_trans = orig->latest_trans;
319 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
321 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
322 device = kzalloc(sizeof(*device), GFP_NOFS);
326 device->name = kstrdup(orig_dev->name, GFP_NOFS);
330 device->devid = orig_dev->devid;
331 device->work.func = pending_bios_fn;
332 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
333 device->barriers = 1;
334 spin_lock_init(&device->io_lock);
335 INIT_LIST_HEAD(&device->dev_list);
336 INIT_LIST_HEAD(&device->dev_alloc_list);
338 list_add(&device->dev_list, &fs_devices->devices);
339 device->fs_devices = fs_devices;
340 fs_devices->num_devices++;
344 free_fs_devices(fs_devices);
345 return ERR_PTR(-ENOMEM);
348 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
350 struct btrfs_device *device, *next;
352 mutex_lock(&uuid_mutex);
354 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
355 if (device->in_fs_metadata)
359 close_bdev_exclusive(device->bdev, device->mode);
361 fs_devices->open_devices--;
363 if (device->writeable) {
364 list_del_init(&device->dev_alloc_list);
365 device->writeable = 0;
366 fs_devices->rw_devices--;
368 list_del_init(&device->dev_list);
369 fs_devices->num_devices--;
374 if (fs_devices->seed) {
375 fs_devices = fs_devices->seed;
379 mutex_unlock(&uuid_mutex);
383 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
385 struct btrfs_device *device;
387 if (--fs_devices->opened > 0)
390 list_for_each_entry(device, &fs_devices->devices, dev_list) {
392 close_bdev_exclusive(device->bdev, device->mode);
393 fs_devices->open_devices--;
395 if (device->writeable) {
396 list_del_init(&device->dev_alloc_list);
397 fs_devices->rw_devices--;
401 device->writeable = 0;
402 device->in_fs_metadata = 0;
404 WARN_ON(fs_devices->open_devices);
405 WARN_ON(fs_devices->rw_devices);
406 fs_devices->opened = 0;
407 fs_devices->seeding = 0;
412 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
414 struct btrfs_fs_devices *seed_devices = NULL;
417 mutex_lock(&uuid_mutex);
418 ret = __btrfs_close_devices(fs_devices);
419 if (!fs_devices->opened) {
420 seed_devices = fs_devices->seed;
421 fs_devices->seed = NULL;
423 mutex_unlock(&uuid_mutex);
425 while (seed_devices) {
426 fs_devices = seed_devices;
427 seed_devices = fs_devices->seed;
428 __btrfs_close_devices(fs_devices);
429 free_fs_devices(fs_devices);
434 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
435 fmode_t flags, void *holder)
437 struct block_device *bdev;
438 struct list_head *head = &fs_devices->devices;
439 struct btrfs_device *device;
440 struct block_device *latest_bdev = NULL;
441 struct buffer_head *bh;
442 struct btrfs_super_block *disk_super;
443 u64 latest_devid = 0;
444 u64 latest_transid = 0;
449 list_for_each_entry(device, head, dev_list) {
455 bdev = open_bdev_exclusive(device->name, flags, holder);
457 printk(KERN_INFO "open %s failed\n", device->name);
460 set_blocksize(bdev, 4096);
462 bh = btrfs_read_dev_super(bdev);
466 disk_super = (struct btrfs_super_block *)bh->b_data;
467 devid = le64_to_cpu(disk_super->dev_item.devid);
468 if (devid != device->devid)
471 if (memcmp(device->uuid, disk_super->dev_item.uuid,
475 device->generation = btrfs_super_generation(disk_super);
476 if (!latest_transid || device->generation > latest_transid) {
477 latest_devid = devid;
478 latest_transid = device->generation;
482 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
483 device->writeable = 0;
485 device->writeable = !bdev_read_only(bdev);
490 device->in_fs_metadata = 0;
491 device->mode = flags;
493 fs_devices->open_devices++;
494 if (device->writeable) {
495 fs_devices->rw_devices++;
496 list_add(&device->dev_alloc_list,
497 &fs_devices->alloc_list);
504 close_bdev_exclusive(bdev, FMODE_READ);
508 if (fs_devices->open_devices == 0) {
512 fs_devices->seeding = seeding;
513 fs_devices->opened = 1;
514 fs_devices->latest_bdev = latest_bdev;
515 fs_devices->latest_devid = latest_devid;
516 fs_devices->latest_trans = latest_transid;
517 fs_devices->total_rw_bytes = 0;
522 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
523 fmode_t flags, void *holder)
527 mutex_lock(&uuid_mutex);
528 if (fs_devices->opened) {
529 fs_devices->opened++;
532 ret = __btrfs_open_devices(fs_devices, flags, holder);
534 mutex_unlock(&uuid_mutex);
538 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
539 struct btrfs_fs_devices **fs_devices_ret)
541 struct btrfs_super_block *disk_super;
542 struct block_device *bdev;
543 struct buffer_head *bh;
548 mutex_lock(&uuid_mutex);
550 bdev = open_bdev_exclusive(path, flags, holder);
557 ret = set_blocksize(bdev, 4096);
560 bh = btrfs_read_dev_super(bdev);
565 disk_super = (struct btrfs_super_block *)bh->b_data;
566 devid = le64_to_cpu(disk_super->dev_item.devid);
567 transid = btrfs_super_generation(disk_super);
568 if (disk_super->label[0])
569 printk(KERN_INFO "device label %s ", disk_super->label);
571 /* FIXME, make a readl uuid parser */
572 printk(KERN_INFO "device fsid %llx-%llx ",
573 *(unsigned long long *)disk_super->fsid,
574 *(unsigned long long *)(disk_super->fsid + 8));
576 printk(KERN_CONT "devid %llu transid %llu %s\n",
577 (unsigned long long)devid, (unsigned long long)transid, path);
578 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
582 close_bdev_exclusive(bdev, flags);
584 mutex_unlock(&uuid_mutex);
589 * this uses a pretty simple search, the expectation is that it is
590 * called very infrequently and that a given device has a small number
593 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
594 struct btrfs_device *device,
595 u64 num_bytes, u64 *start)
597 struct btrfs_key key;
598 struct btrfs_root *root = device->dev_root;
599 struct btrfs_dev_extent *dev_extent = NULL;
600 struct btrfs_path *path;
603 u64 search_start = 0;
604 u64 search_end = device->total_bytes;
608 struct extent_buffer *l;
610 path = btrfs_alloc_path();
616 /* FIXME use last free of some kind */
618 /* we don't want to overwrite the superblock on the drive,
619 * so we make sure to start at an offset of at least 1MB
621 search_start = max((u64)1024 * 1024, search_start);
623 if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
624 search_start = max(root->fs_info->alloc_start, search_start);
626 key.objectid = device->devid;
627 key.offset = search_start;
628 key.type = BTRFS_DEV_EXTENT_KEY;
629 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
632 ret = btrfs_previous_item(root, path, 0, key.type);
636 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
639 slot = path->slots[0];
640 if (slot >= btrfs_header_nritems(l)) {
641 ret = btrfs_next_leaf(root, path);
648 if (search_start >= search_end) {
652 *start = search_start;
656 *start = last_byte > search_start ?
657 last_byte : search_start;
658 if (search_end <= *start) {
664 btrfs_item_key_to_cpu(l, &key, slot);
666 if (key.objectid < device->devid)
669 if (key.objectid > device->devid)
672 if (key.offset >= search_start && key.offset > last_byte &&
674 if (last_byte < search_start)
675 last_byte = search_start;
676 hole_size = key.offset - last_byte;
677 if (key.offset > last_byte &&
678 hole_size >= num_bytes) {
683 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
687 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
688 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
694 /* we have to make sure we didn't find an extent that has already
695 * been allocated by the map tree or the original allocation
697 BUG_ON(*start < search_start);
699 if (*start + num_bytes > search_end) {
703 /* check for pending inserts here */
707 btrfs_free_path(path);
711 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
712 struct btrfs_device *device,
716 struct btrfs_path *path;
717 struct btrfs_root *root = device->dev_root;
718 struct btrfs_key key;
719 struct btrfs_key found_key;
720 struct extent_buffer *leaf = NULL;
721 struct btrfs_dev_extent *extent = NULL;
723 path = btrfs_alloc_path();
727 key.objectid = device->devid;
729 key.type = BTRFS_DEV_EXTENT_KEY;
731 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
733 ret = btrfs_previous_item(root, path, key.objectid,
734 BTRFS_DEV_EXTENT_KEY);
736 leaf = path->nodes[0];
737 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
738 extent = btrfs_item_ptr(leaf, path->slots[0],
739 struct btrfs_dev_extent);
740 BUG_ON(found_key.offset > start || found_key.offset +
741 btrfs_dev_extent_length(leaf, extent) < start);
743 } else if (ret == 0) {
744 leaf = path->nodes[0];
745 extent = btrfs_item_ptr(leaf, path->slots[0],
746 struct btrfs_dev_extent);
750 if (device->bytes_used > 0)
751 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
752 ret = btrfs_del_item(trans, root, path);
755 btrfs_free_path(path);
759 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
760 struct btrfs_device *device,
761 u64 chunk_tree, u64 chunk_objectid,
762 u64 chunk_offset, u64 start, u64 num_bytes)
765 struct btrfs_path *path;
766 struct btrfs_root *root = device->dev_root;
767 struct btrfs_dev_extent *extent;
768 struct extent_buffer *leaf;
769 struct btrfs_key key;
771 WARN_ON(!device->in_fs_metadata);
772 path = btrfs_alloc_path();
776 key.objectid = device->devid;
778 key.type = BTRFS_DEV_EXTENT_KEY;
779 ret = btrfs_insert_empty_item(trans, root, path, &key,
783 leaf = path->nodes[0];
784 extent = btrfs_item_ptr(leaf, path->slots[0],
785 struct btrfs_dev_extent);
786 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
787 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
788 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
790 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
791 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
794 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
795 btrfs_mark_buffer_dirty(leaf);
796 btrfs_free_path(path);
800 static noinline int find_next_chunk(struct btrfs_root *root,
801 u64 objectid, u64 *offset)
803 struct btrfs_path *path;
805 struct btrfs_key key;
806 struct btrfs_chunk *chunk;
807 struct btrfs_key found_key;
809 path = btrfs_alloc_path();
812 key.objectid = objectid;
813 key.offset = (u64)-1;
814 key.type = BTRFS_CHUNK_ITEM_KEY;
816 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
822 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
826 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
828 if (found_key.objectid != objectid)
831 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
833 *offset = found_key.offset +
834 btrfs_chunk_length(path->nodes[0], chunk);
839 btrfs_free_path(path);
843 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
846 struct btrfs_key key;
847 struct btrfs_key found_key;
848 struct btrfs_path *path;
850 root = root->fs_info->chunk_root;
852 path = btrfs_alloc_path();
856 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
857 key.type = BTRFS_DEV_ITEM_KEY;
858 key.offset = (u64)-1;
860 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
866 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
871 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
873 *objectid = found_key.offset + 1;
877 btrfs_free_path(path);
882 * the device information is stored in the chunk root
883 * the btrfs_device struct should be fully filled in
885 int btrfs_add_device(struct btrfs_trans_handle *trans,
886 struct btrfs_root *root,
887 struct btrfs_device *device)
890 struct btrfs_path *path;
891 struct btrfs_dev_item *dev_item;
892 struct extent_buffer *leaf;
893 struct btrfs_key key;
896 root = root->fs_info->chunk_root;
898 path = btrfs_alloc_path();
902 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
903 key.type = BTRFS_DEV_ITEM_KEY;
904 key.offset = device->devid;
906 ret = btrfs_insert_empty_item(trans, root, path, &key,
911 leaf = path->nodes[0];
912 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
914 btrfs_set_device_id(leaf, dev_item, device->devid);
915 btrfs_set_device_generation(leaf, dev_item, 0);
916 btrfs_set_device_type(leaf, dev_item, device->type);
917 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
918 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
919 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
920 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
921 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
922 btrfs_set_device_group(leaf, dev_item, 0);
923 btrfs_set_device_seek_speed(leaf, dev_item, 0);
924 btrfs_set_device_bandwidth(leaf, dev_item, 0);
925 btrfs_set_device_start_offset(leaf, dev_item, 0);
927 ptr = (unsigned long)btrfs_device_uuid(dev_item);
928 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
929 ptr = (unsigned long)btrfs_device_fsid(dev_item);
930 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
931 btrfs_mark_buffer_dirty(leaf);
935 btrfs_free_path(path);
939 static int btrfs_rm_dev_item(struct btrfs_root *root,
940 struct btrfs_device *device)
943 struct btrfs_path *path;
944 struct btrfs_key key;
945 struct btrfs_trans_handle *trans;
947 root = root->fs_info->chunk_root;
949 path = btrfs_alloc_path();
953 trans = btrfs_start_transaction(root, 1);
954 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
955 key.type = BTRFS_DEV_ITEM_KEY;
956 key.offset = device->devid;
959 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
968 ret = btrfs_del_item(trans, root, path);
972 btrfs_free_path(path);
974 btrfs_commit_transaction(trans, root);
978 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
980 struct btrfs_device *device;
981 struct btrfs_device *next_device;
982 struct block_device *bdev;
983 struct buffer_head *bh = NULL;
984 struct btrfs_super_block *disk_super;
991 mutex_lock(&uuid_mutex);
992 mutex_lock(&root->fs_info->volume_mutex);
994 all_avail = root->fs_info->avail_data_alloc_bits |
995 root->fs_info->avail_system_alloc_bits |
996 root->fs_info->avail_metadata_alloc_bits;
998 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
999 root->fs_info->fs_devices->rw_devices <= 4) {
1000 printk(KERN_ERR "btrfs: unable to go below four devices "
1006 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1007 root->fs_info->fs_devices->rw_devices <= 2) {
1008 printk(KERN_ERR "btrfs: unable to go below two "
1009 "devices on raid1\n");
1014 if (strcmp(device_path, "missing") == 0) {
1015 struct list_head *devices;
1016 struct btrfs_device *tmp;
1019 devices = &root->fs_info->fs_devices->devices;
1020 list_for_each_entry(tmp, devices, dev_list) {
1021 if (tmp->in_fs_metadata && !tmp->bdev) {
1030 printk(KERN_ERR "btrfs: no missing devices found to "
1035 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1036 root->fs_info->bdev_holder);
1038 ret = PTR_ERR(bdev);
1042 set_blocksize(bdev, 4096);
1043 bh = btrfs_read_dev_super(bdev);
1048 disk_super = (struct btrfs_super_block *)bh->b_data;
1049 devid = le64_to_cpu(disk_super->dev_item.devid);
1050 dev_uuid = disk_super->dev_item.uuid;
1051 device = btrfs_find_device(root, devid, dev_uuid,
1059 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1060 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1066 if (device->writeable) {
1067 list_del_init(&device->dev_alloc_list);
1068 root->fs_info->fs_devices->rw_devices--;
1071 ret = btrfs_shrink_device(device, 0);
1075 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1079 device->in_fs_metadata = 0;
1080 list_del_init(&device->dev_list);
1081 device->fs_devices->num_devices--;
1083 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1084 struct btrfs_device, dev_list);
1085 if (device->bdev == root->fs_info->sb->s_bdev)
1086 root->fs_info->sb->s_bdev = next_device->bdev;
1087 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1088 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1091 close_bdev_exclusive(device->bdev, device->mode);
1092 device->bdev = NULL;
1093 device->fs_devices->open_devices--;
1096 num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1097 btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1099 if (device->fs_devices->open_devices == 0) {
1100 struct btrfs_fs_devices *fs_devices;
1101 fs_devices = root->fs_info->fs_devices;
1102 while (fs_devices) {
1103 if (fs_devices->seed == device->fs_devices)
1105 fs_devices = fs_devices->seed;
1107 fs_devices->seed = device->fs_devices->seed;
1108 device->fs_devices->seed = NULL;
1109 __btrfs_close_devices(device->fs_devices);
1110 free_fs_devices(device->fs_devices);
1114 * at this point, the device is zero sized. We want to
1115 * remove it from the devices list and zero out the old super
1117 if (device->writeable) {
1118 /* make sure this device isn't detected as part of
1121 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1122 set_buffer_dirty(bh);
1123 sync_dirty_buffer(bh);
1126 kfree(device->name);
1134 close_bdev_exclusive(bdev, FMODE_READ);
1136 mutex_unlock(&root->fs_info->volume_mutex);
1137 mutex_unlock(&uuid_mutex);
1142 * does all the dirty work required for changing file system's UUID.
1144 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1145 struct btrfs_root *root)
1147 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1148 struct btrfs_fs_devices *old_devices;
1149 struct btrfs_fs_devices *seed_devices;
1150 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1151 struct btrfs_device *device;
1154 BUG_ON(!mutex_is_locked(&uuid_mutex));
1155 if (!fs_devices->seeding)
1158 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1162 old_devices = clone_fs_devices(fs_devices);
1163 if (IS_ERR(old_devices)) {
1164 kfree(seed_devices);
1165 return PTR_ERR(old_devices);
1168 list_add(&old_devices->list, &fs_uuids);
1170 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1171 seed_devices->opened = 1;
1172 INIT_LIST_HEAD(&seed_devices->devices);
1173 INIT_LIST_HEAD(&seed_devices->alloc_list);
1174 list_splice_init(&fs_devices->devices, &seed_devices->devices);
1175 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1176 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1177 device->fs_devices = seed_devices;
1180 fs_devices->seeding = 0;
1181 fs_devices->num_devices = 0;
1182 fs_devices->open_devices = 0;
1183 fs_devices->seed = seed_devices;
1185 generate_random_uuid(fs_devices->fsid);
1186 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1187 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1188 super_flags = btrfs_super_flags(disk_super) &
1189 ~BTRFS_SUPER_FLAG_SEEDING;
1190 btrfs_set_super_flags(disk_super, super_flags);
1196 * strore the expected generation for seed devices in device items.
1198 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1199 struct btrfs_root *root)
1201 struct btrfs_path *path;
1202 struct extent_buffer *leaf;
1203 struct btrfs_dev_item *dev_item;
1204 struct btrfs_device *device;
1205 struct btrfs_key key;
1206 u8 fs_uuid[BTRFS_UUID_SIZE];
1207 u8 dev_uuid[BTRFS_UUID_SIZE];
1211 path = btrfs_alloc_path();
1215 root = root->fs_info->chunk_root;
1216 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1218 key.type = BTRFS_DEV_ITEM_KEY;
1221 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1225 leaf = path->nodes[0];
1227 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1228 ret = btrfs_next_leaf(root, path);
1233 leaf = path->nodes[0];
1234 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1235 btrfs_release_path(root, path);
1239 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1240 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1241 key.type != BTRFS_DEV_ITEM_KEY)
1244 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1245 struct btrfs_dev_item);
1246 devid = btrfs_device_id(leaf, dev_item);
1247 read_extent_buffer(leaf, dev_uuid,
1248 (unsigned long)btrfs_device_uuid(dev_item),
1250 read_extent_buffer(leaf, fs_uuid,
1251 (unsigned long)btrfs_device_fsid(dev_item),
1253 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1256 if (device->fs_devices->seeding) {
1257 btrfs_set_device_generation(leaf, dev_item,
1258 device->generation);
1259 btrfs_mark_buffer_dirty(leaf);
1267 btrfs_free_path(path);
1271 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1273 struct btrfs_trans_handle *trans;
1274 struct btrfs_device *device;
1275 struct block_device *bdev;
1276 struct list_head *devices;
1277 struct super_block *sb = root->fs_info->sb;
1279 int seeding_dev = 0;
1282 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1285 bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1289 if (root->fs_info->fs_devices->seeding) {
1291 down_write(&sb->s_umount);
1292 mutex_lock(&uuid_mutex);
1295 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1296 mutex_lock(&root->fs_info->volume_mutex);
1298 devices = &root->fs_info->fs_devices->devices;
1299 list_for_each_entry(device, devices, dev_list) {
1300 if (device->bdev == bdev) {
1306 device = kzalloc(sizeof(*device), GFP_NOFS);
1308 /* we can safely leave the fs_devices entry around */
1313 device->name = kstrdup(device_path, GFP_NOFS);
1314 if (!device->name) {
1320 ret = find_next_devid(root, &device->devid);
1326 trans = btrfs_start_transaction(root, 1);
1329 device->barriers = 1;
1330 device->writeable = 1;
1331 device->work.func = pending_bios_fn;
1332 generate_random_uuid(device->uuid);
1333 spin_lock_init(&device->io_lock);
1334 device->generation = trans->transid;
1335 device->io_width = root->sectorsize;
1336 device->io_align = root->sectorsize;
1337 device->sector_size = root->sectorsize;
1338 device->total_bytes = i_size_read(bdev->bd_inode);
1339 device->dev_root = root->fs_info->dev_root;
1340 device->bdev = bdev;
1341 device->in_fs_metadata = 1;
1343 set_blocksize(device->bdev, 4096);
1346 sb->s_flags &= ~MS_RDONLY;
1347 ret = btrfs_prepare_sprout(trans, root);
1351 device->fs_devices = root->fs_info->fs_devices;
1352 list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1353 list_add(&device->dev_alloc_list,
1354 &root->fs_info->fs_devices->alloc_list);
1355 root->fs_info->fs_devices->num_devices++;
1356 root->fs_info->fs_devices->open_devices++;
1357 root->fs_info->fs_devices->rw_devices++;
1358 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1360 total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1361 btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1362 total_bytes + device->total_bytes);
1364 total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1365 btrfs_set_super_num_devices(&root->fs_info->super_copy,
1369 ret = init_first_rw_device(trans, root, device);
1371 ret = btrfs_finish_sprout(trans, root);
1374 ret = btrfs_add_device(trans, root, device);
1378 * we've got more storage, clear any full flags on the space
1381 btrfs_clear_space_info_full(root->fs_info);
1383 unlock_chunks(root);
1384 btrfs_commit_transaction(trans, root);
1387 mutex_unlock(&uuid_mutex);
1388 up_write(&sb->s_umount);
1390 ret = btrfs_relocate_sys_chunks(root);
1394 mutex_unlock(&root->fs_info->volume_mutex);
1397 close_bdev_exclusive(bdev, 0);
1399 mutex_unlock(&uuid_mutex);
1400 up_write(&sb->s_umount);
1405 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1406 struct btrfs_device *device)
1409 struct btrfs_path *path;
1410 struct btrfs_root *root;
1411 struct btrfs_dev_item *dev_item;
1412 struct extent_buffer *leaf;
1413 struct btrfs_key key;
1415 root = device->dev_root->fs_info->chunk_root;
1417 path = btrfs_alloc_path();
1421 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1422 key.type = BTRFS_DEV_ITEM_KEY;
1423 key.offset = device->devid;
1425 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1434 leaf = path->nodes[0];
1435 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1437 btrfs_set_device_id(leaf, dev_item, device->devid);
1438 btrfs_set_device_type(leaf, dev_item, device->type);
1439 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1440 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1441 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1442 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1443 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1444 btrfs_mark_buffer_dirty(leaf);
1447 btrfs_free_path(path);
1451 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1452 struct btrfs_device *device, u64 new_size)
1454 struct btrfs_super_block *super_copy =
1455 &device->dev_root->fs_info->super_copy;
1456 u64 old_total = btrfs_super_total_bytes(super_copy);
1457 u64 diff = new_size - device->total_bytes;
1459 if (!device->writeable)
1461 if (new_size <= device->total_bytes)
1464 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1465 device->fs_devices->total_rw_bytes += diff;
1467 device->total_bytes = new_size;
1468 btrfs_clear_space_info_full(device->dev_root->fs_info);
1470 return btrfs_update_device(trans, device);
1473 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1474 struct btrfs_device *device, u64 new_size)
1477 lock_chunks(device->dev_root);
1478 ret = __btrfs_grow_device(trans, device, new_size);
1479 unlock_chunks(device->dev_root);
1483 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1484 struct btrfs_root *root,
1485 u64 chunk_tree, u64 chunk_objectid,
1489 struct btrfs_path *path;
1490 struct btrfs_key key;
1492 root = root->fs_info->chunk_root;
1493 path = btrfs_alloc_path();
1497 key.objectid = chunk_objectid;
1498 key.offset = chunk_offset;
1499 key.type = BTRFS_CHUNK_ITEM_KEY;
1501 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1504 ret = btrfs_del_item(trans, root, path);
1507 btrfs_free_path(path);
1511 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1514 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1515 struct btrfs_disk_key *disk_key;
1516 struct btrfs_chunk *chunk;
1523 struct btrfs_key key;
1525 array_size = btrfs_super_sys_array_size(super_copy);
1527 ptr = super_copy->sys_chunk_array;
1530 while (cur < array_size) {
1531 disk_key = (struct btrfs_disk_key *)ptr;
1532 btrfs_disk_key_to_cpu(&key, disk_key);
1534 len = sizeof(*disk_key);
1536 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1537 chunk = (struct btrfs_chunk *)(ptr + len);
1538 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1539 len += btrfs_chunk_item_size(num_stripes);
1544 if (key.objectid == chunk_objectid &&
1545 key.offset == chunk_offset) {
1546 memmove(ptr, ptr + len, array_size - (cur + len));
1548 btrfs_set_super_sys_array_size(super_copy, array_size);
1557 static int btrfs_relocate_chunk(struct btrfs_root *root,
1558 u64 chunk_tree, u64 chunk_objectid,
1561 struct extent_map_tree *em_tree;
1562 struct btrfs_root *extent_root;
1563 struct btrfs_trans_handle *trans;
1564 struct extent_map *em;
1565 struct map_lookup *map;
1569 printk(KERN_INFO "btrfs relocating chunk %llu\n",
1570 (unsigned long long)chunk_offset);
1571 root = root->fs_info->chunk_root;
1572 extent_root = root->fs_info->extent_root;
1573 em_tree = &root->fs_info->mapping_tree.map_tree;
1575 /* step one, relocate all the extents inside this chunk */
1576 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1579 trans = btrfs_start_transaction(root, 1);
1585 * step two, delete the device extents and the
1586 * chunk tree entries
1588 spin_lock(&em_tree->lock);
1589 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1590 spin_unlock(&em_tree->lock);
1592 BUG_ON(em->start > chunk_offset ||
1593 em->start + em->len < chunk_offset);
1594 map = (struct map_lookup *)em->bdev;
1596 for (i = 0; i < map->num_stripes; i++) {
1597 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1598 map->stripes[i].physical);
1601 if (map->stripes[i].dev) {
1602 ret = btrfs_update_device(trans, map->stripes[i].dev);
1606 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1611 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1612 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1616 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1619 spin_lock(&em_tree->lock);
1620 remove_extent_mapping(em_tree, em);
1621 spin_unlock(&em_tree->lock);
1626 /* once for the tree */
1627 free_extent_map(em);
1629 free_extent_map(em);
1631 unlock_chunks(root);
1632 btrfs_end_transaction(trans, root);
1636 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1638 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1639 struct btrfs_path *path;
1640 struct extent_buffer *leaf;
1641 struct btrfs_chunk *chunk;
1642 struct btrfs_key key;
1643 struct btrfs_key found_key;
1644 u64 chunk_tree = chunk_root->root_key.objectid;
1648 path = btrfs_alloc_path();
1652 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1653 key.offset = (u64)-1;
1654 key.type = BTRFS_CHUNK_ITEM_KEY;
1657 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1662 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1669 leaf = path->nodes[0];
1670 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1672 chunk = btrfs_item_ptr(leaf, path->slots[0],
1673 struct btrfs_chunk);
1674 chunk_type = btrfs_chunk_type(leaf, chunk);
1675 btrfs_release_path(chunk_root, path);
1677 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1678 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1684 if (found_key.offset == 0)
1686 key.offset = found_key.offset - 1;
1690 btrfs_free_path(path);
1694 static u64 div_factor(u64 num, int factor)
1703 int btrfs_balance(struct btrfs_root *dev_root)
1706 struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1707 struct btrfs_device *device;
1710 struct btrfs_path *path;
1711 struct btrfs_key key;
1712 struct btrfs_chunk *chunk;
1713 struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1714 struct btrfs_trans_handle *trans;
1715 struct btrfs_key found_key;
1717 if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1720 mutex_lock(&dev_root->fs_info->volume_mutex);
1721 dev_root = dev_root->fs_info->dev_root;
1723 /* step one make some room on all the devices */
1724 list_for_each_entry(device, devices, dev_list) {
1725 old_size = device->total_bytes;
1726 size_to_free = div_factor(old_size, 1);
1727 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1728 if (!device->writeable ||
1729 device->total_bytes - device->bytes_used > size_to_free)
1732 ret = btrfs_shrink_device(device, old_size - size_to_free);
1735 trans = btrfs_start_transaction(dev_root, 1);
1738 ret = btrfs_grow_device(trans, device, old_size);
1741 btrfs_end_transaction(trans, dev_root);
1744 /* step two, relocate all the chunks */
1745 path = btrfs_alloc_path();
1748 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1749 key.offset = (u64)-1;
1750 key.type = BTRFS_CHUNK_ITEM_KEY;
1753 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1758 * this shouldn't happen, it means the last relocate
1764 ret = btrfs_previous_item(chunk_root, path, 0,
1765 BTRFS_CHUNK_ITEM_KEY);
1769 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1771 if (found_key.objectid != key.objectid)
1774 chunk = btrfs_item_ptr(path->nodes[0],
1776 struct btrfs_chunk);
1777 key.offset = found_key.offset;
1778 /* chunk zero is special */
1779 if (key.offset == 0)
1782 btrfs_release_path(chunk_root, path);
1783 ret = btrfs_relocate_chunk(chunk_root,
1784 chunk_root->root_key.objectid,
1791 btrfs_free_path(path);
1792 mutex_unlock(&dev_root->fs_info->volume_mutex);
1797 * shrinking a device means finding all of the device extents past
1798 * the new size, and then following the back refs to the chunks.
1799 * The chunk relocation code actually frees the device extent
1801 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1803 struct btrfs_trans_handle *trans;
1804 struct btrfs_root *root = device->dev_root;
1805 struct btrfs_dev_extent *dev_extent = NULL;
1806 struct btrfs_path *path;
1813 struct extent_buffer *l;
1814 struct btrfs_key key;
1815 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1816 u64 old_total = btrfs_super_total_bytes(super_copy);
1817 u64 diff = device->total_bytes - new_size;
1819 if (new_size >= device->total_bytes)
1822 path = btrfs_alloc_path();
1826 trans = btrfs_start_transaction(root, 1);
1836 device->total_bytes = new_size;
1837 if (device->writeable)
1838 device->fs_devices->total_rw_bytes -= diff;
1839 ret = btrfs_update_device(trans, device);
1841 unlock_chunks(root);
1842 btrfs_end_transaction(trans, root);
1845 WARN_ON(diff > old_total);
1846 btrfs_set_super_total_bytes(super_copy, old_total - diff);
1847 unlock_chunks(root);
1848 btrfs_end_transaction(trans, root);
1850 key.objectid = device->devid;
1851 key.offset = (u64)-1;
1852 key.type = BTRFS_DEV_EXTENT_KEY;
1855 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1859 ret = btrfs_previous_item(root, path, 0, key.type);
1868 slot = path->slots[0];
1869 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1871 if (key.objectid != device->devid)
1874 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1875 length = btrfs_dev_extent_length(l, dev_extent);
1877 if (key.offset + length <= new_size)
1880 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1881 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1882 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1883 btrfs_release_path(root, path);
1885 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1892 btrfs_free_path(path);
1896 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root,
1898 struct btrfs_key *key,
1899 struct btrfs_chunk *chunk, int item_size)
1901 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1902 struct btrfs_disk_key disk_key;
1906 array_size = btrfs_super_sys_array_size(super_copy);
1907 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1910 ptr = super_copy->sys_chunk_array + array_size;
1911 btrfs_cpu_key_to_disk(&disk_key, key);
1912 memcpy(ptr, &disk_key, sizeof(disk_key));
1913 ptr += sizeof(disk_key);
1914 memcpy(ptr, chunk, item_size);
1915 item_size += sizeof(disk_key);
1916 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1920 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
1921 int num_stripes, int sub_stripes)
1923 if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1925 else if (type & BTRFS_BLOCK_GROUP_RAID10)
1926 return calc_size * (num_stripes / sub_stripes);
1928 return calc_size * num_stripes;
1931 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1932 struct btrfs_root *extent_root,
1933 struct map_lookup **map_ret,
1934 u64 *num_bytes, u64 *stripe_size,
1935 u64 start, u64 type)
1937 struct btrfs_fs_info *info = extent_root->fs_info;
1938 struct btrfs_device *device = NULL;
1939 struct btrfs_fs_devices *fs_devices = info->fs_devices;
1940 struct list_head *cur;
1941 struct map_lookup *map = NULL;
1942 struct extent_map_tree *em_tree;
1943 struct extent_map *em;
1944 struct list_head private_devs;
1945 int min_stripe_size = 1 * 1024 * 1024;
1946 u64 calc_size = 1024 * 1024 * 1024;
1947 u64 max_chunk_size = calc_size;
1952 int num_stripes = 1;
1953 int min_stripes = 1;
1954 int sub_stripes = 0;
1958 int stripe_len = 64 * 1024;
1960 if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1961 (type & BTRFS_BLOCK_GROUP_DUP)) {
1963 type &= ~BTRFS_BLOCK_GROUP_DUP;
1965 if (list_empty(&fs_devices->alloc_list))
1968 if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1969 num_stripes = fs_devices->rw_devices;
1972 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1976 if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1977 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
1978 if (num_stripes < 2)
1982 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1983 num_stripes = fs_devices->rw_devices;
1984 if (num_stripes < 4)
1986 num_stripes &= ~(u32)1;
1991 if (type & BTRFS_BLOCK_GROUP_DATA) {
1992 max_chunk_size = 10 * calc_size;
1993 min_stripe_size = 64 * 1024 * 1024;
1994 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1995 max_chunk_size = 4 * calc_size;
1996 min_stripe_size = 32 * 1024 * 1024;
1997 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1998 calc_size = 8 * 1024 * 1024;
1999 max_chunk_size = calc_size * 2;
2000 min_stripe_size = 1 * 1024 * 1024;
2003 /* we don't want a chunk larger than 10% of writeable space */
2004 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2008 if (!map || map->num_stripes != num_stripes) {
2010 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2013 map->num_stripes = num_stripes;
2016 if (calc_size * num_stripes > max_chunk_size) {
2017 calc_size = max_chunk_size;
2018 do_div(calc_size, num_stripes);
2019 do_div(calc_size, stripe_len);
2020 calc_size *= stripe_len;
2022 /* we don't want tiny stripes */
2023 calc_size = max_t(u64, min_stripe_size, calc_size);
2025 do_div(calc_size, stripe_len);
2026 calc_size *= stripe_len;
2028 cur = fs_devices->alloc_list.next;
2031 if (type & BTRFS_BLOCK_GROUP_DUP)
2032 min_free = calc_size * 2;
2034 min_free = calc_size;
2037 * we add 1MB because we never use the first 1MB of the device, unless
2038 * we've looped, then we are likely allocating the maximum amount of
2039 * space left already
2042 min_free += 1024 * 1024;
2044 INIT_LIST_HEAD(&private_devs);
2045 while (index < num_stripes) {
2046 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2047 BUG_ON(!device->writeable);
2048 if (device->total_bytes > device->bytes_used)
2049 avail = device->total_bytes - device->bytes_used;
2054 if (device->in_fs_metadata && avail >= min_free) {
2055 ret = find_free_dev_extent(trans, device,
2056 min_free, &dev_offset);
2058 list_move_tail(&device->dev_alloc_list,
2060 map->stripes[index].dev = device;
2061 map->stripes[index].physical = dev_offset;
2063 if (type & BTRFS_BLOCK_GROUP_DUP) {
2064 map->stripes[index].dev = device;
2065 map->stripes[index].physical =
2066 dev_offset + calc_size;
2070 } else if (device->in_fs_metadata && avail > max_avail)
2072 if (cur == &fs_devices->alloc_list)
2075 list_splice(&private_devs, &fs_devices->alloc_list);
2076 if (index < num_stripes) {
2077 if (index >= min_stripes) {
2078 num_stripes = index;
2079 if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2080 num_stripes /= sub_stripes;
2081 num_stripes *= sub_stripes;
2086 if (!looped && max_avail > 0) {
2088 calc_size = max_avail;
2094 map->sector_size = extent_root->sectorsize;
2095 map->stripe_len = stripe_len;
2096 map->io_align = stripe_len;
2097 map->io_width = stripe_len;
2099 map->num_stripes = num_stripes;
2100 map->sub_stripes = sub_stripes;
2103 *stripe_size = calc_size;
2104 *num_bytes = chunk_bytes_by_type(type, calc_size,
2105 num_stripes, sub_stripes);
2107 em = alloc_extent_map(GFP_NOFS);
2112 em->bdev = (struct block_device *)map;
2114 em->len = *num_bytes;
2115 em->block_start = 0;
2116 em->block_len = em->len;
2118 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2119 spin_lock(&em_tree->lock);
2120 ret = add_extent_mapping(em_tree, em);
2121 spin_unlock(&em_tree->lock);
2123 free_extent_map(em);
2125 ret = btrfs_make_block_group(trans, extent_root, 0, type,
2126 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2131 while (index < map->num_stripes) {
2132 device = map->stripes[index].dev;
2133 dev_offset = map->stripes[index].physical;
2135 ret = btrfs_alloc_dev_extent(trans, device,
2136 info->chunk_root->root_key.objectid,
2137 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2138 start, dev_offset, calc_size);
2146 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2147 struct btrfs_root *extent_root,
2148 struct map_lookup *map, u64 chunk_offset,
2149 u64 chunk_size, u64 stripe_size)
2152 struct btrfs_key key;
2153 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2154 struct btrfs_device *device;
2155 struct btrfs_chunk *chunk;
2156 struct btrfs_stripe *stripe;
2157 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2161 chunk = kzalloc(item_size, GFP_NOFS);
2166 while (index < map->num_stripes) {
2167 device = map->stripes[index].dev;
2168 device->bytes_used += stripe_size;
2169 ret = btrfs_update_device(trans, device);
2175 stripe = &chunk->stripe;
2176 while (index < map->num_stripes) {
2177 device = map->stripes[index].dev;
2178 dev_offset = map->stripes[index].physical;
2180 btrfs_set_stack_stripe_devid(stripe, device->devid);
2181 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2182 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2187 btrfs_set_stack_chunk_length(chunk, chunk_size);
2188 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2189 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2190 btrfs_set_stack_chunk_type(chunk, map->type);
2191 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2192 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2193 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2194 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2195 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2197 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2198 key.type = BTRFS_CHUNK_ITEM_KEY;
2199 key.offset = chunk_offset;
2201 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2204 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2205 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2214 * Chunk allocation falls into two parts. The first part does works
2215 * that make the new allocated chunk useable, but not do any operation
2216 * that modifies the chunk tree. The second part does the works that
2217 * require modifying the chunk tree. This division is important for the
2218 * bootstrap process of adding storage to a seed btrfs.
2220 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2221 struct btrfs_root *extent_root, u64 type)
2226 struct map_lookup *map;
2227 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2230 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2235 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2236 &stripe_size, chunk_offset, type);
2240 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2241 chunk_size, stripe_size);
2246 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2247 struct btrfs_root *root,
2248 struct btrfs_device *device)
2251 u64 sys_chunk_offset;
2255 u64 sys_stripe_size;
2257 struct map_lookup *map;
2258 struct map_lookup *sys_map;
2259 struct btrfs_fs_info *fs_info = root->fs_info;
2260 struct btrfs_root *extent_root = fs_info->extent_root;
2263 ret = find_next_chunk(fs_info->chunk_root,
2264 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2267 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2268 (fs_info->metadata_alloc_profile &
2269 fs_info->avail_metadata_alloc_bits);
2270 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2272 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2273 &stripe_size, chunk_offset, alloc_profile);
2276 sys_chunk_offset = chunk_offset + chunk_size;
2278 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2279 (fs_info->system_alloc_profile &
2280 fs_info->avail_system_alloc_bits);
2281 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2283 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2284 &sys_chunk_size, &sys_stripe_size,
2285 sys_chunk_offset, alloc_profile);
2288 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2292 * Modifying chunk tree needs allocating new blocks from both
2293 * system block group and metadata block group. So we only can
2294 * do operations require modifying the chunk tree after both
2295 * block groups were created.
2297 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2298 chunk_size, stripe_size);
2301 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2302 sys_chunk_offset, sys_chunk_size,
2308 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2310 struct extent_map *em;
2311 struct map_lookup *map;
2312 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2316 spin_lock(&map_tree->map_tree.lock);
2317 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2318 spin_unlock(&map_tree->map_tree.lock);
2322 map = (struct map_lookup *)em->bdev;
2323 for (i = 0; i < map->num_stripes; i++) {
2324 if (!map->stripes[i].dev->writeable) {
2329 free_extent_map(em);
2333 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2335 extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2338 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2340 struct extent_map *em;
2343 spin_lock(&tree->map_tree.lock);
2344 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2346 remove_extent_mapping(&tree->map_tree, em);
2347 spin_unlock(&tree->map_tree.lock);
2352 free_extent_map(em);
2353 /* once for the tree */
2354 free_extent_map(em);
2358 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2360 struct extent_map *em;
2361 struct map_lookup *map;
2362 struct extent_map_tree *em_tree = &map_tree->map_tree;
2365 spin_lock(&em_tree->lock);
2366 em = lookup_extent_mapping(em_tree, logical, len);
2367 spin_unlock(&em_tree->lock);
2370 BUG_ON(em->start > logical || em->start + em->len < logical);
2371 map = (struct map_lookup *)em->bdev;
2372 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2373 ret = map->num_stripes;
2374 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2375 ret = map->sub_stripes;
2378 free_extent_map(em);
2382 static int find_live_mirror(struct map_lookup *map, int first, int num,
2386 if (map->stripes[optimal].dev->bdev)
2388 for (i = first; i < first + num; i++) {
2389 if (map->stripes[i].dev->bdev)
2392 /* we couldn't find one that doesn't fail. Just return something
2393 * and the io error handling code will clean up eventually
2398 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2399 u64 logical, u64 *length,
2400 struct btrfs_multi_bio **multi_ret,
2401 int mirror_num, struct page *unplug_page)
2403 struct extent_map *em;
2404 struct map_lookup *map;
2405 struct extent_map_tree *em_tree = &map_tree->map_tree;
2409 int stripes_allocated = 8;
2410 int stripes_required = 1;
2415 struct btrfs_multi_bio *multi = NULL;
2417 if (multi_ret && !(rw & (1 << BIO_RW)))
2418 stripes_allocated = 1;
2421 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2426 atomic_set(&multi->error, 0);
2429 spin_lock(&em_tree->lock);
2430 em = lookup_extent_mapping(em_tree, logical, *length);
2431 spin_unlock(&em_tree->lock);
2433 if (!em && unplug_page)
2437 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2438 (unsigned long long)logical,
2439 (unsigned long long)*length);
2443 BUG_ON(em->start > logical || em->start + em->len < logical);
2444 map = (struct map_lookup *)em->bdev;
2445 offset = logical - em->start;
2447 if (mirror_num > map->num_stripes)
2450 /* if our multi bio struct is too small, back off and try again */
2451 if (rw & (1 << BIO_RW)) {
2452 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2453 BTRFS_BLOCK_GROUP_DUP)) {
2454 stripes_required = map->num_stripes;
2456 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2457 stripes_required = map->sub_stripes;
2461 if (multi_ret && rw == WRITE &&
2462 stripes_allocated < stripes_required) {
2463 stripes_allocated = map->num_stripes;
2464 free_extent_map(em);
2470 * stripe_nr counts the total number of stripes we have to stride
2471 * to get to this block
2473 do_div(stripe_nr, map->stripe_len);
2475 stripe_offset = stripe_nr * map->stripe_len;
2476 BUG_ON(offset < stripe_offset);
2478 /* stripe_offset is the offset of this block in its stripe*/
2479 stripe_offset = offset - stripe_offset;
2481 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2482 BTRFS_BLOCK_GROUP_RAID10 |
2483 BTRFS_BLOCK_GROUP_DUP)) {
2484 /* we limit the length of each bio to what fits in a stripe */
2485 *length = min_t(u64, em->len - offset,
2486 map->stripe_len - stripe_offset);
2488 *length = em->len - offset;
2491 if (!multi_ret && !unplug_page)
2496 if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2497 if (unplug_page || (rw & (1 << BIO_RW)))
2498 num_stripes = map->num_stripes;
2499 else if (mirror_num)
2500 stripe_index = mirror_num - 1;
2502 stripe_index = find_live_mirror(map, 0,
2504 current->pid % map->num_stripes);
2507 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2508 if (rw & (1 << BIO_RW))
2509 num_stripes = map->num_stripes;
2510 else if (mirror_num)
2511 stripe_index = mirror_num - 1;
2513 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2514 int factor = map->num_stripes / map->sub_stripes;
2516 stripe_index = do_div(stripe_nr, factor);
2517 stripe_index *= map->sub_stripes;
2519 if (unplug_page || (rw & (1 << BIO_RW)))
2520 num_stripes = map->sub_stripes;
2521 else if (mirror_num)
2522 stripe_index += mirror_num - 1;
2524 stripe_index = find_live_mirror(map, stripe_index,
2525 map->sub_stripes, stripe_index +
2526 current->pid % map->sub_stripes);
2530 * after this do_div call, stripe_nr is the number of stripes
2531 * on this device we have to walk to find the data, and
2532 * stripe_index is the number of our device in the stripe array
2534 stripe_index = do_div(stripe_nr, map->num_stripes);
2536 BUG_ON(stripe_index >= map->num_stripes);
2538 for (i = 0; i < num_stripes; i++) {
2540 struct btrfs_device *device;
2541 struct backing_dev_info *bdi;
2543 device = map->stripes[stripe_index].dev;
2545 bdi = blk_get_backing_dev_info(device->bdev);
2546 if (bdi->unplug_io_fn)
2547 bdi->unplug_io_fn(bdi, unplug_page);
2550 multi->stripes[i].physical =
2551 map->stripes[stripe_index].physical +
2552 stripe_offset + stripe_nr * map->stripe_len;
2553 multi->stripes[i].dev = map->stripes[stripe_index].dev;
2559 multi->num_stripes = num_stripes;
2560 multi->max_errors = max_errors;
2563 free_extent_map(em);
2567 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2568 u64 logical, u64 *length,
2569 struct btrfs_multi_bio **multi_ret, int mirror_num)
2571 return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2575 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2576 u64 chunk_start, u64 physical, u64 devid,
2577 u64 **logical, int *naddrs, int *stripe_len)
2579 struct extent_map_tree *em_tree = &map_tree->map_tree;
2580 struct extent_map *em;
2581 struct map_lookup *map;
2588 spin_lock(&em_tree->lock);
2589 em = lookup_extent_mapping(em_tree, chunk_start, 1);
2590 spin_unlock(&em_tree->lock);
2592 BUG_ON(!em || em->start != chunk_start);
2593 map = (struct map_lookup *)em->bdev;
2596 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2597 do_div(length, map->num_stripes / map->sub_stripes);
2598 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2599 do_div(length, map->num_stripes);
2601 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2604 for (i = 0; i < map->num_stripes; i++) {
2605 if (devid && map->stripes[i].dev->devid != devid)
2607 if (map->stripes[i].physical > physical ||
2608 map->stripes[i].physical + length <= physical)
2611 stripe_nr = physical - map->stripes[i].physical;
2612 do_div(stripe_nr, map->stripe_len);
2614 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2615 stripe_nr = stripe_nr * map->num_stripes + i;
2616 do_div(stripe_nr, map->sub_stripes);
2617 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2618 stripe_nr = stripe_nr * map->num_stripes + i;
2620 bytenr = chunk_start + stripe_nr * map->stripe_len;
2621 WARN_ON(nr >= map->num_stripes);
2622 for (j = 0; j < nr; j++) {
2623 if (buf[j] == bytenr)
2627 WARN_ON(nr >= map->num_stripes);
2632 for (i = 0; i > nr; i++) {
2633 struct btrfs_multi_bio *multi;
2634 struct btrfs_bio_stripe *stripe;
2638 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2639 &length, &multi, 0);
2642 stripe = multi->stripes;
2643 for (j = 0; j < multi->num_stripes; j++) {
2644 if (stripe->physical >= physical &&
2645 physical < stripe->physical + length)
2648 BUG_ON(j >= multi->num_stripes);
2654 *stripe_len = map->stripe_len;
2656 free_extent_map(em);
2660 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2661 u64 logical, struct page *page)
2663 u64 length = PAGE_CACHE_SIZE;
2664 return __btrfs_map_block(map_tree, READ, logical, &length,
2668 static void end_bio_multi_stripe(struct bio *bio, int err)
2670 struct btrfs_multi_bio *multi = bio->bi_private;
2671 int is_orig_bio = 0;
2674 atomic_inc(&multi->error);
2676 if (bio == multi->orig_bio)
2679 if (atomic_dec_and_test(&multi->stripes_pending)) {
2682 bio = multi->orig_bio;
2684 bio->bi_private = multi->private;
2685 bio->bi_end_io = multi->end_io;
2686 /* only send an error to the higher layers if it is
2687 * beyond the tolerance of the multi-bio
2689 if (atomic_read(&multi->error) > multi->max_errors) {
2693 * this bio is actually up to date, we didn't
2694 * go over the max number of errors
2696 set_bit(BIO_UPTODATE, &bio->bi_flags);
2701 bio_endio(bio, err);
2702 } else if (!is_orig_bio) {
2707 struct async_sched {
2710 struct btrfs_fs_info *info;
2711 struct btrfs_work work;
2715 * see run_scheduled_bios for a description of why bios are collected for
2718 * This will add one bio to the pending list for a device and make sure
2719 * the work struct is scheduled.
2721 static noinline int schedule_bio(struct btrfs_root *root,
2722 struct btrfs_device *device,
2723 int rw, struct bio *bio)
2725 int should_queue = 1;
2727 /* don't bother with additional async steps for reads, right now */
2728 if (!(rw & (1 << BIO_RW))) {
2730 submit_bio(rw, bio);
2736 * nr_async_bios allows us to reliably return congestion to the
2737 * higher layers. Otherwise, the async bio makes it appear we have
2738 * made progress against dirty pages when we've really just put it
2739 * on a queue for later
2741 atomic_inc(&root->fs_info->nr_async_bios);
2742 WARN_ON(bio->bi_next);
2743 bio->bi_next = NULL;
2746 spin_lock(&device->io_lock);
2748 if (device->pending_bio_tail)
2749 device->pending_bio_tail->bi_next = bio;
2751 device->pending_bio_tail = bio;
2752 if (!device->pending_bios)
2753 device->pending_bios = bio;
2754 if (device->running_pending)
2757 spin_unlock(&device->io_lock);
2760 btrfs_queue_worker(&root->fs_info->submit_workers,
2765 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2766 int mirror_num, int async_submit)
2768 struct btrfs_mapping_tree *map_tree;
2769 struct btrfs_device *dev;
2770 struct bio *first_bio = bio;
2771 u64 logical = (u64)bio->bi_sector << 9;
2774 struct btrfs_multi_bio *multi = NULL;
2779 length = bio->bi_size;
2780 map_tree = &root->fs_info->mapping_tree;
2781 map_length = length;
2783 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2787 total_devs = multi->num_stripes;
2788 if (map_length < length) {
2789 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
2790 "len %llu\n", (unsigned long long)logical,
2791 (unsigned long long)length,
2792 (unsigned long long)map_length);
2795 multi->end_io = first_bio->bi_end_io;
2796 multi->private = first_bio->bi_private;
2797 multi->orig_bio = first_bio;
2798 atomic_set(&multi->stripes_pending, multi->num_stripes);
2800 while (dev_nr < total_devs) {
2801 if (total_devs > 1) {
2802 if (dev_nr < total_devs - 1) {
2803 bio = bio_clone(first_bio, GFP_NOFS);
2808 bio->bi_private = multi;
2809 bio->bi_end_io = end_bio_multi_stripe;
2811 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2812 dev = multi->stripes[dev_nr].dev;
2813 BUG_ON(rw == WRITE && !dev->writeable);
2814 if (dev && dev->bdev) {
2815 bio->bi_bdev = dev->bdev;
2817 schedule_bio(root, dev, rw, bio);
2819 submit_bio(rw, bio);
2821 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2822 bio->bi_sector = logical >> 9;
2823 bio_endio(bio, -EIO);
2827 if (total_devs == 1)
2832 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2835 struct btrfs_device *device;
2836 struct btrfs_fs_devices *cur_devices;
2838 cur_devices = root->fs_info->fs_devices;
2839 while (cur_devices) {
2841 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2842 device = __find_device(&cur_devices->devices,
2847 cur_devices = cur_devices->seed;
2852 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2853 u64 devid, u8 *dev_uuid)
2855 struct btrfs_device *device;
2856 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2858 device = kzalloc(sizeof(*device), GFP_NOFS);
2861 list_add(&device->dev_list,
2862 &fs_devices->devices);
2863 device->barriers = 1;
2864 device->dev_root = root->fs_info->dev_root;
2865 device->devid = devid;
2866 device->work.func = pending_bios_fn;
2867 device->fs_devices = fs_devices;
2868 fs_devices->num_devices++;
2869 spin_lock_init(&device->io_lock);
2870 INIT_LIST_HEAD(&device->dev_alloc_list);
2871 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2875 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2876 struct extent_buffer *leaf,
2877 struct btrfs_chunk *chunk)
2879 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2880 struct map_lookup *map;
2881 struct extent_map *em;
2885 u8 uuid[BTRFS_UUID_SIZE];
2890 logical = key->offset;
2891 length = btrfs_chunk_length(leaf, chunk);
2893 spin_lock(&map_tree->map_tree.lock);
2894 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2895 spin_unlock(&map_tree->map_tree.lock);
2897 /* already mapped? */
2898 if (em && em->start <= logical && em->start + em->len > logical) {
2899 free_extent_map(em);
2902 free_extent_map(em);
2905 em = alloc_extent_map(GFP_NOFS);
2908 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2909 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2911 free_extent_map(em);
2915 em->bdev = (struct block_device *)map;
2916 em->start = logical;
2918 em->block_start = 0;
2919 em->block_len = em->len;
2921 map->num_stripes = num_stripes;
2922 map->io_width = btrfs_chunk_io_width(leaf, chunk);
2923 map->io_align = btrfs_chunk_io_align(leaf, chunk);
2924 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2925 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2926 map->type = btrfs_chunk_type(leaf, chunk);
2927 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2928 for (i = 0; i < num_stripes; i++) {
2929 map->stripes[i].physical =
2930 btrfs_stripe_offset_nr(leaf, chunk, i);
2931 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2932 read_extent_buffer(leaf, uuid, (unsigned long)
2933 btrfs_stripe_dev_uuid_nr(chunk, i),
2935 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
2937 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2939 free_extent_map(em);
2942 if (!map->stripes[i].dev) {
2943 map->stripes[i].dev =
2944 add_missing_dev(root, devid, uuid);
2945 if (!map->stripes[i].dev) {
2947 free_extent_map(em);
2951 map->stripes[i].dev->in_fs_metadata = 1;
2954 spin_lock(&map_tree->map_tree.lock);
2955 ret = add_extent_mapping(&map_tree->map_tree, em);
2956 spin_unlock(&map_tree->map_tree.lock);
2958 free_extent_map(em);
2963 static int fill_device_from_item(struct extent_buffer *leaf,
2964 struct btrfs_dev_item *dev_item,
2965 struct btrfs_device *device)
2969 device->devid = btrfs_device_id(leaf, dev_item);
2970 device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2971 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2972 device->type = btrfs_device_type(leaf, dev_item);
2973 device->io_align = btrfs_device_io_align(leaf, dev_item);
2974 device->io_width = btrfs_device_io_width(leaf, dev_item);
2975 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2977 ptr = (unsigned long)btrfs_device_uuid(dev_item);
2978 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2983 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
2985 struct btrfs_fs_devices *fs_devices;
2988 mutex_lock(&uuid_mutex);
2990 fs_devices = root->fs_info->fs_devices->seed;
2991 while (fs_devices) {
2992 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2996 fs_devices = fs_devices->seed;
2999 fs_devices = find_fsid(fsid);
3005 fs_devices = clone_fs_devices(fs_devices);
3006 if (IS_ERR(fs_devices)) {
3007 ret = PTR_ERR(fs_devices);
3011 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3012 root->fs_info->bdev_holder);
3016 if (!fs_devices->seeding) {
3017 __btrfs_close_devices(fs_devices);
3018 free_fs_devices(fs_devices);
3023 fs_devices->seed = root->fs_info->fs_devices->seed;
3024 root->fs_info->fs_devices->seed = fs_devices;
3026 mutex_unlock(&uuid_mutex);
3030 static int read_one_dev(struct btrfs_root *root,
3031 struct extent_buffer *leaf,
3032 struct btrfs_dev_item *dev_item)
3034 struct btrfs_device *device;
3037 u8 fs_uuid[BTRFS_UUID_SIZE];
3038 u8 dev_uuid[BTRFS_UUID_SIZE];
3040 devid = btrfs_device_id(leaf, dev_item);
3041 read_extent_buffer(leaf, dev_uuid,
3042 (unsigned long)btrfs_device_uuid(dev_item),
3044 read_extent_buffer(leaf, fs_uuid,
3045 (unsigned long)btrfs_device_fsid(dev_item),
3048 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3049 ret = open_seed_devices(root, fs_uuid);
3050 if (ret && !btrfs_test_opt(root, DEGRADED))
3054 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3055 if (!device || !device->bdev) {
3056 if (!btrfs_test_opt(root, DEGRADED))
3060 printk(KERN_WARNING "warning devid %llu missing\n",
3061 (unsigned long long)devid);
3062 device = add_missing_dev(root, devid, dev_uuid);
3068 if (device->fs_devices != root->fs_info->fs_devices) {
3069 BUG_ON(device->writeable);
3070 if (device->generation !=
3071 btrfs_device_generation(leaf, dev_item))
3075 fill_device_from_item(leaf, dev_item, device);
3076 device->dev_root = root->fs_info->dev_root;
3077 device->in_fs_metadata = 1;
3078 if (device->writeable)
3079 device->fs_devices->total_rw_bytes += device->total_bytes;
3084 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3086 struct btrfs_dev_item *dev_item;
3088 dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3090 return read_one_dev(root, buf, dev_item);
3093 int btrfs_read_sys_array(struct btrfs_root *root)
3095 struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3096 struct extent_buffer *sb;
3097 struct btrfs_disk_key *disk_key;
3098 struct btrfs_chunk *chunk;
3100 unsigned long sb_ptr;
3106 struct btrfs_key key;
3108 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3109 BTRFS_SUPER_INFO_SIZE);
3112 btrfs_set_buffer_uptodate(sb);
3113 btrfs_set_buffer_lockdep_class(sb, 0);
3115 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3116 array_size = btrfs_super_sys_array_size(super_copy);
3118 ptr = super_copy->sys_chunk_array;
3119 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3122 while (cur < array_size) {
3123 disk_key = (struct btrfs_disk_key *)ptr;
3124 btrfs_disk_key_to_cpu(&key, disk_key);
3126 len = sizeof(*disk_key); ptr += len;
3130 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3131 chunk = (struct btrfs_chunk *)sb_ptr;
3132 ret = read_one_chunk(root, &key, sb, chunk);
3135 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3136 len = btrfs_chunk_item_size(num_stripes);
3145 free_extent_buffer(sb);
3149 int btrfs_read_chunk_tree(struct btrfs_root *root)
3151 struct btrfs_path *path;
3152 struct extent_buffer *leaf;
3153 struct btrfs_key key;
3154 struct btrfs_key found_key;
3158 root = root->fs_info->chunk_root;
3160 path = btrfs_alloc_path();
3164 /* first we search for all of the device items, and then we
3165 * read in all of the chunk items. This way we can create chunk
3166 * mappings that reference all of the devices that are afound
3168 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3172 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3174 leaf = path->nodes[0];
3175 slot = path->slots[0];
3176 if (slot >= btrfs_header_nritems(leaf)) {
3177 ret = btrfs_next_leaf(root, path);
3184 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3185 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3186 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3188 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3189 struct btrfs_dev_item *dev_item;
3190 dev_item = btrfs_item_ptr(leaf, slot,
3191 struct btrfs_dev_item);
3192 ret = read_one_dev(root, leaf, dev_item);
3196 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3197 struct btrfs_chunk *chunk;
3198 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3199 ret = read_one_chunk(root, &found_key, leaf, chunk);
3205 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3207 btrfs_release_path(root, path);
3212 btrfs_free_path(path);