Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <linux/version.h>
24 #include <asm/div64.h>
25 #include "compat.h"
26 #include "ctree.h"
27 #include "extent_map.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "print-tree.h"
31 #include "volumes.h"
32 #include "async-thread.h"
33
34 struct map_lookup {
35         u64 type;
36         int io_align;
37         int io_width;
38         int stripe_len;
39         int sector_size;
40         int num_stripes;
41         int sub_stripes;
42         struct btrfs_bio_stripe stripes[];
43 };
44
45 static int init_first_rw_device(struct btrfs_trans_handle *trans,
46                                 struct btrfs_root *root,
47                                 struct btrfs_device *device);
48 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
49
50 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
51                             (sizeof(struct btrfs_bio_stripe) * (n)))
52
53 static DEFINE_MUTEX(uuid_mutex);
54 static LIST_HEAD(fs_uuids);
55
56 void btrfs_lock_volumes(void)
57 {
58         mutex_lock(&uuid_mutex);
59 }
60
61 void btrfs_unlock_volumes(void)
62 {
63         mutex_unlock(&uuid_mutex);
64 }
65
66 static void lock_chunks(struct btrfs_root *root)
67 {
68         mutex_lock(&root->fs_info->chunk_mutex);
69 }
70
71 static void unlock_chunks(struct btrfs_root *root)
72 {
73         mutex_unlock(&root->fs_info->chunk_mutex);
74 }
75
76 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
77 {
78         struct btrfs_device *device;
79         WARN_ON(fs_devices->opened);
80         while (!list_empty(&fs_devices->devices)) {
81                 device = list_entry(fs_devices->devices.next,
82                                     struct btrfs_device, dev_list);
83                 list_del(&device->dev_list);
84                 kfree(device->name);
85                 kfree(device);
86         }
87         kfree(fs_devices);
88 }
89
90 int btrfs_cleanup_fs_uuids(void)
91 {
92         struct btrfs_fs_devices *fs_devices;
93
94         while (!list_empty(&fs_uuids)) {
95                 fs_devices = list_entry(fs_uuids.next,
96                                         struct btrfs_fs_devices, list);
97                 list_del(&fs_devices->list);
98                 free_fs_devices(fs_devices);
99         }
100         return 0;
101 }
102
103 static noinline struct btrfs_device *__find_device(struct list_head *head,
104                                                    u64 devid, u8 *uuid)
105 {
106         struct btrfs_device *dev;
107         struct list_head *cur;
108
109         list_for_each(cur, head) {
110                 dev = list_entry(cur, struct btrfs_device, dev_list);
111                 if (dev->devid == devid &&
112                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
113                         return dev;
114                 }
115         }
116         return NULL;
117 }
118
119 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
120 {
121         struct list_head *cur;
122         struct btrfs_fs_devices *fs_devices;
123
124         list_for_each(cur, &fs_uuids) {
125                 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
126                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
127                         return fs_devices;
128         }
129         return NULL;
130 }
131
132 /*
133  * we try to collect pending bios for a device so we don't get a large
134  * number of procs sending bios down to the same device.  This greatly
135  * improves the schedulers ability to collect and merge the bios.
136  *
137  * But, it also turns into a long list of bios to process and that is sure
138  * to eventually make the worker thread block.  The solution here is to
139  * make some progress and then put this work struct back at the end of
140  * the list if the block device is congested.  This way, multiple devices
141  * can make progress from a single worker thread.
142  */
143 static noinline int run_scheduled_bios(struct btrfs_device *device)
144 {
145         struct bio *pending;
146         struct backing_dev_info *bdi;
147         struct btrfs_fs_info *fs_info;
148         struct bio *tail;
149         struct bio *cur;
150         int again = 0;
151         unsigned long num_run = 0;
152         unsigned long limit;
153
154         bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
155         fs_info = device->dev_root->fs_info;
156         limit = btrfs_async_submit_limit(fs_info);
157         limit = limit * 2 / 3;
158
159 loop:
160         spin_lock(&device->io_lock);
161
162         /* take all the bios off the list at once and process them
163          * later on (without the lock held).  But, remember the
164          * tail and other pointers so the bios can be properly reinserted
165          * into the list if we hit congestion
166          */
167         pending = device->pending_bios;
168         tail = device->pending_bio_tail;
169         WARN_ON(pending && !tail);
170         device->pending_bios = NULL;
171         device->pending_bio_tail = NULL;
172
173         /*
174          * if pending was null this time around, no bios need processing
175          * at all and we can stop.  Otherwise it'll loop back up again
176          * and do an additional check so no bios are missed.
177          *
178          * device->running_pending is used to synchronize with the
179          * schedule_bio code.
180          */
181         if (pending) {
182                 again = 1;
183                 device->running_pending = 1;
184         } else {
185                 again = 0;
186                 device->running_pending = 0;
187         }
188         spin_unlock(&device->io_lock);
189
190         while (pending) {
191                 cur = pending;
192                 pending = pending->bi_next;
193                 cur->bi_next = NULL;
194                 atomic_dec(&fs_info->nr_async_bios);
195
196                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
197                     waitqueue_active(&fs_info->async_submit_wait))
198                         wake_up(&fs_info->async_submit_wait);
199
200                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
201                 bio_get(cur);
202                 submit_bio(cur->bi_rw, cur);
203                 bio_put(cur);
204                 num_run++;
205
206                 /*
207                  * we made progress, there is more work to do and the bdi
208                  * is now congested.  Back off and let other work structs
209                  * run instead
210                  */
211                 if (pending && bdi_write_congested(bdi) &&
212                     fs_info->fs_devices->open_devices > 1) {
213                         struct bio *old_head;
214
215                         spin_lock(&device->io_lock);
216
217                         old_head = device->pending_bios;
218                         device->pending_bios = pending;
219                         if (device->pending_bio_tail)
220                                 tail->bi_next = old_head;
221                         else
222                                 device->pending_bio_tail = tail;
223
224                         spin_unlock(&device->io_lock);
225                         btrfs_requeue_work(&device->work);
226                         goto done;
227                 }
228         }
229         if (again)
230                 goto loop;
231 done:
232         return 0;
233 }
234
235 static void pending_bios_fn(struct btrfs_work *work)
236 {
237         struct btrfs_device *device;
238
239         device = container_of(work, struct btrfs_device, work);
240         run_scheduled_bios(device);
241 }
242
243 static noinline int device_list_add(const char *path,
244                            struct btrfs_super_block *disk_super,
245                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
246 {
247         struct btrfs_device *device;
248         struct btrfs_fs_devices *fs_devices;
249         u64 found_transid = btrfs_super_generation(disk_super);
250
251         fs_devices = find_fsid(disk_super->fsid);
252         if (!fs_devices) {
253                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
254                 if (!fs_devices)
255                         return -ENOMEM;
256                 INIT_LIST_HEAD(&fs_devices->devices);
257                 INIT_LIST_HEAD(&fs_devices->alloc_list);
258                 list_add(&fs_devices->list, &fs_uuids);
259                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
260                 fs_devices->latest_devid = devid;
261                 fs_devices->latest_trans = found_transid;
262                 device = NULL;
263         } else {
264                 device = __find_device(&fs_devices->devices, devid,
265                                        disk_super->dev_item.uuid);
266         }
267         if (!device) {
268                 if (fs_devices->opened)
269                         return -EBUSY;
270
271                 device = kzalloc(sizeof(*device), GFP_NOFS);
272                 if (!device) {
273                         /* we can safely leave the fs_devices entry around */
274                         return -ENOMEM;
275                 }
276                 device->devid = devid;
277                 device->work.func = pending_bios_fn;
278                 memcpy(device->uuid, disk_super->dev_item.uuid,
279                        BTRFS_UUID_SIZE);
280                 device->barriers = 1;
281                 spin_lock_init(&device->io_lock);
282                 device->name = kstrdup(path, GFP_NOFS);
283                 if (!device->name) {
284                         kfree(device);
285                         return -ENOMEM;
286                 }
287                 INIT_LIST_HEAD(&device->dev_alloc_list);
288                 list_add(&device->dev_list, &fs_devices->devices);
289                 device->fs_devices = fs_devices;
290                 fs_devices->num_devices++;
291         }
292
293         if (found_transid > fs_devices->latest_trans) {
294                 fs_devices->latest_devid = devid;
295                 fs_devices->latest_trans = found_transid;
296         }
297         *fs_devices_ret = fs_devices;
298         return 0;
299 }
300
301 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
302 {
303         struct btrfs_fs_devices *fs_devices;
304         struct btrfs_device *device;
305         struct btrfs_device *orig_dev;
306
307         fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
308         if (!fs_devices)
309                 return ERR_PTR(-ENOMEM);
310
311         INIT_LIST_HEAD(&fs_devices->devices);
312         INIT_LIST_HEAD(&fs_devices->alloc_list);
313         INIT_LIST_HEAD(&fs_devices->list);
314         fs_devices->latest_devid = orig->latest_devid;
315         fs_devices->latest_trans = orig->latest_trans;
316         memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
317
318         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
319                 device = kzalloc(sizeof(*device), GFP_NOFS);
320                 if (!device)
321                         goto error;
322
323                 device->name = kstrdup(orig_dev->name, GFP_NOFS);
324                 if (!device->name)
325                         goto error;
326
327                 device->devid = orig_dev->devid;
328                 device->work.func = pending_bios_fn;
329                 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
330                 device->barriers = 1;
331                 spin_lock_init(&device->io_lock);
332                 INIT_LIST_HEAD(&device->dev_list);
333                 INIT_LIST_HEAD(&device->dev_alloc_list);
334
335                 list_add(&device->dev_list, &fs_devices->devices);
336                 device->fs_devices = fs_devices;
337                 fs_devices->num_devices++;
338         }
339         return fs_devices;
340 error:
341         free_fs_devices(fs_devices);
342         return ERR_PTR(-ENOMEM);
343 }
344
345 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
346 {
347         struct list_head *tmp;
348         struct list_head *cur;
349         struct btrfs_device *device;
350
351         mutex_lock(&uuid_mutex);
352 again:
353         list_for_each_safe(cur, tmp, &fs_devices->devices) {
354                 device = list_entry(cur, struct btrfs_device, dev_list);
355                 if (device->in_fs_metadata)
356                         continue;
357
358                 if (device->bdev) {
359                         close_bdev_exclusive(device->bdev, device->mode);
360                         device->bdev = NULL;
361                         fs_devices->open_devices--;
362                 }
363                 if (device->writeable) {
364                         list_del_init(&device->dev_alloc_list);
365                         device->writeable = 0;
366                         fs_devices->rw_devices--;
367                 }
368                 list_del_init(&device->dev_list);
369                 fs_devices->num_devices--;
370                 kfree(device->name);
371                 kfree(device);
372         }
373
374         if (fs_devices->seed) {
375                 fs_devices = fs_devices->seed;
376                 goto again;
377         }
378
379         mutex_unlock(&uuid_mutex);
380         return 0;
381 }
382
383 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
384 {
385         struct list_head *cur;
386         struct btrfs_device *device;
387
388         if (--fs_devices->opened > 0)
389                 return 0;
390
391         list_for_each(cur, &fs_devices->devices) {
392                 device = list_entry(cur, struct btrfs_device, dev_list);
393                 if (device->bdev) {
394                         close_bdev_exclusive(device->bdev, device->mode);
395                         fs_devices->open_devices--;
396                 }
397                 if (device->writeable) {
398                         list_del_init(&device->dev_alloc_list);
399                         fs_devices->rw_devices--;
400                 }
401
402                 device->bdev = NULL;
403                 device->writeable = 0;
404                 device->in_fs_metadata = 0;
405         }
406         WARN_ON(fs_devices->open_devices);
407         WARN_ON(fs_devices->rw_devices);
408         fs_devices->opened = 0;
409         fs_devices->seeding = 0;
410
411         return 0;
412 }
413
414 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
415 {
416         struct btrfs_fs_devices *seed_devices = NULL;
417         int ret;
418
419         mutex_lock(&uuid_mutex);
420         ret = __btrfs_close_devices(fs_devices);
421         if (!fs_devices->opened) {
422                 seed_devices = fs_devices->seed;
423                 fs_devices->seed = NULL;
424         }
425         mutex_unlock(&uuid_mutex);
426
427         while (seed_devices) {
428                 fs_devices = seed_devices;
429                 seed_devices = fs_devices->seed;
430                 __btrfs_close_devices(fs_devices);
431                 free_fs_devices(fs_devices);
432         }
433         return ret;
434 }
435
436 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
437                                 fmode_t flags, void *holder)
438 {
439         struct block_device *bdev;
440         struct list_head *head = &fs_devices->devices;
441         struct list_head *cur;
442         struct btrfs_device *device;
443         struct block_device *latest_bdev = NULL;
444         struct buffer_head *bh;
445         struct btrfs_super_block *disk_super;
446         u64 latest_devid = 0;
447         u64 latest_transid = 0;
448         u64 devid;
449         int seeding = 1;
450         int ret = 0;
451
452         list_for_each(cur, head) {
453                 device = list_entry(cur, struct btrfs_device, dev_list);
454                 if (device->bdev)
455                         continue;
456                 if (!device->name)
457                         continue;
458
459                 bdev = open_bdev_exclusive(device->name, flags, holder);
460                 if (IS_ERR(bdev)) {
461                         printk(KERN_INFO "open %s failed\n", device->name);
462                         goto error;
463                 }
464                 set_blocksize(bdev, 4096);
465
466                 bh = btrfs_read_dev_super(bdev);
467                 if (!bh)
468                         goto error_close;
469
470                 disk_super = (struct btrfs_super_block *)bh->b_data;
471                 devid = le64_to_cpu(disk_super->dev_item.devid);
472                 if (devid != device->devid)
473                         goto error_brelse;
474
475                 if (memcmp(device->uuid, disk_super->dev_item.uuid,
476                            BTRFS_UUID_SIZE))
477                         goto error_brelse;
478
479                 device->generation = btrfs_super_generation(disk_super);
480                 if (!latest_transid || device->generation > latest_transid) {
481                         latest_devid = devid;
482                         latest_transid = device->generation;
483                         latest_bdev = bdev;
484                 }
485
486                 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
487                         device->writeable = 0;
488                 } else {
489                         device->writeable = !bdev_read_only(bdev);
490                         seeding = 0;
491                 }
492
493                 device->bdev = bdev;
494                 device->in_fs_metadata = 0;
495                 device->mode = flags;
496
497                 fs_devices->open_devices++;
498                 if (device->writeable) {
499                         fs_devices->rw_devices++;
500                         list_add(&device->dev_alloc_list,
501                                  &fs_devices->alloc_list);
502                 }
503                 continue;
504
505 error_brelse:
506                 brelse(bh);
507 error_close:
508                 close_bdev_exclusive(bdev, FMODE_READ);
509 error:
510                 continue;
511         }
512         if (fs_devices->open_devices == 0) {
513                 ret = -EIO;
514                 goto out;
515         }
516         fs_devices->seeding = seeding;
517         fs_devices->opened = 1;
518         fs_devices->latest_bdev = latest_bdev;
519         fs_devices->latest_devid = latest_devid;
520         fs_devices->latest_trans = latest_transid;
521         fs_devices->total_rw_bytes = 0;
522 out:
523         return ret;
524 }
525
526 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
527                        fmode_t flags, void *holder)
528 {
529         int ret;
530
531         mutex_lock(&uuid_mutex);
532         if (fs_devices->opened) {
533                 fs_devices->opened++;
534                 ret = 0;
535         } else {
536                 ret = __btrfs_open_devices(fs_devices, flags, holder);
537         }
538         mutex_unlock(&uuid_mutex);
539         return ret;
540 }
541
542 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
543                           struct btrfs_fs_devices **fs_devices_ret)
544 {
545         struct btrfs_super_block *disk_super;
546         struct block_device *bdev;
547         struct buffer_head *bh;
548         int ret;
549         u64 devid;
550         u64 transid;
551
552         mutex_lock(&uuid_mutex);
553
554         bdev = open_bdev_exclusive(path, flags, holder);
555
556         if (IS_ERR(bdev)) {
557                 ret = PTR_ERR(bdev);
558                 goto error;
559         }
560
561         ret = set_blocksize(bdev, 4096);
562         if (ret)
563                 goto error_close;
564         bh = btrfs_read_dev_super(bdev);
565         if (!bh) {
566                 ret = -EIO;
567                 goto error_close;
568         }
569         disk_super = (struct btrfs_super_block *)bh->b_data;
570         devid = le64_to_cpu(disk_super->dev_item.devid);
571         transid = btrfs_super_generation(disk_super);
572         if (disk_super->label[0])
573                 printk(KERN_INFO "device label %s ", disk_super->label);
574         else {
575                 /* FIXME, make a readl uuid parser */
576                 printk(KERN_INFO "device fsid %llx-%llx ",
577                        *(unsigned long long *)disk_super->fsid,
578                        *(unsigned long long *)(disk_super->fsid + 8));
579         }
580         printk(KERN_INFO "devid %llu transid %llu %s\n",
581                (unsigned long long)devid, (unsigned long long)transid, path);
582         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
583
584         brelse(bh);
585 error_close:
586         close_bdev_exclusive(bdev, flags);
587 error:
588         mutex_unlock(&uuid_mutex);
589         return ret;
590 }
591
592 /*
593  * this uses a pretty simple search, the expectation is that it is
594  * called very infrequently and that a given device has a small number
595  * of extents
596  */
597 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
598                                          struct btrfs_device *device,
599                                          u64 num_bytes, u64 *start)
600 {
601         struct btrfs_key key;
602         struct btrfs_root *root = device->dev_root;
603         struct btrfs_dev_extent *dev_extent = NULL;
604         struct btrfs_path *path;
605         u64 hole_size = 0;
606         u64 last_byte = 0;
607         u64 search_start = 0;
608         u64 search_end = device->total_bytes;
609         int ret;
610         int slot = 0;
611         int start_found;
612         struct extent_buffer *l;
613
614         path = btrfs_alloc_path();
615         if (!path)
616                 return -ENOMEM;
617         path->reada = 2;
618         start_found = 0;
619
620         /* FIXME use last free of some kind */
621
622         /* we don't want to overwrite the superblock on the drive,
623          * so we make sure to start at an offset of at least 1MB
624          */
625         search_start = max((u64)1024 * 1024, search_start);
626
627         if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
628                 search_start = max(root->fs_info->alloc_start, search_start);
629
630         key.objectid = device->devid;
631         key.offset = search_start;
632         key.type = BTRFS_DEV_EXTENT_KEY;
633         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
634         if (ret < 0)
635                 goto error;
636         ret = btrfs_previous_item(root, path, 0, key.type);
637         if (ret < 0)
638                 goto error;
639         l = path->nodes[0];
640         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
641         while (1) {
642                 l = path->nodes[0];
643                 slot = path->slots[0];
644                 if (slot >= btrfs_header_nritems(l)) {
645                         ret = btrfs_next_leaf(root, path);
646                         if (ret == 0)
647                                 continue;
648                         if (ret < 0)
649                                 goto error;
650 no_more_items:
651                         if (!start_found) {
652                                 if (search_start >= search_end) {
653                                         ret = -ENOSPC;
654                                         goto error;
655                                 }
656                                 *start = search_start;
657                                 start_found = 1;
658                                 goto check_pending;
659                         }
660                         *start = last_byte > search_start ?
661                                 last_byte : search_start;
662                         if (search_end <= *start) {
663                                 ret = -ENOSPC;
664                                 goto error;
665                         }
666                         goto check_pending;
667                 }
668                 btrfs_item_key_to_cpu(l, &key, slot);
669
670                 if (key.objectid < device->devid)
671                         goto next;
672
673                 if (key.objectid > device->devid)
674                         goto no_more_items;
675
676                 if (key.offset >= search_start && key.offset > last_byte &&
677                     start_found) {
678                         if (last_byte < search_start)
679                                 last_byte = search_start;
680                         hole_size = key.offset - last_byte;
681                         if (key.offset > last_byte &&
682                             hole_size >= num_bytes) {
683                                 *start = last_byte;
684                                 goto check_pending;
685                         }
686                 }
687                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
688                         goto next;
689
690                 start_found = 1;
691                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
692                 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
693 next:
694                 path->slots[0]++;
695                 cond_resched();
696         }
697 check_pending:
698         /* we have to make sure we didn't find an extent that has already
699          * been allocated by the map tree or the original allocation
700          */
701         BUG_ON(*start < search_start);
702
703         if (*start + num_bytes > search_end) {
704                 ret = -ENOSPC;
705                 goto error;
706         }
707         /* check for pending inserts here */
708         ret = 0;
709
710 error:
711         btrfs_free_path(path);
712         return ret;
713 }
714
715 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
716                           struct btrfs_device *device,
717                           u64 start)
718 {
719         int ret;
720         struct btrfs_path *path;
721         struct btrfs_root *root = device->dev_root;
722         struct btrfs_key key;
723         struct btrfs_key found_key;
724         struct extent_buffer *leaf = NULL;
725         struct btrfs_dev_extent *extent = NULL;
726
727         path = btrfs_alloc_path();
728         if (!path)
729                 return -ENOMEM;
730
731         key.objectid = device->devid;
732         key.offset = start;
733         key.type = BTRFS_DEV_EXTENT_KEY;
734
735         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
736         if (ret > 0) {
737                 ret = btrfs_previous_item(root, path, key.objectid,
738                                           BTRFS_DEV_EXTENT_KEY);
739                 BUG_ON(ret);
740                 leaf = path->nodes[0];
741                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
742                 extent = btrfs_item_ptr(leaf, path->slots[0],
743                                         struct btrfs_dev_extent);
744                 BUG_ON(found_key.offset > start || found_key.offset +
745                        btrfs_dev_extent_length(leaf, extent) < start);
746                 ret = 0;
747         } else if (ret == 0) {
748                 leaf = path->nodes[0];
749                 extent = btrfs_item_ptr(leaf, path->slots[0],
750                                         struct btrfs_dev_extent);
751         }
752         BUG_ON(ret);
753
754         if (device->bytes_used > 0)
755                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
756         ret = btrfs_del_item(trans, root, path);
757         BUG_ON(ret);
758
759         btrfs_free_path(path);
760         return ret;
761 }
762
763 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
764                            struct btrfs_device *device,
765                            u64 chunk_tree, u64 chunk_objectid,
766                            u64 chunk_offset, u64 start, u64 num_bytes)
767 {
768         int ret;
769         struct btrfs_path *path;
770         struct btrfs_root *root = device->dev_root;
771         struct btrfs_dev_extent *extent;
772         struct extent_buffer *leaf;
773         struct btrfs_key key;
774
775         WARN_ON(!device->in_fs_metadata);
776         path = btrfs_alloc_path();
777         if (!path)
778                 return -ENOMEM;
779
780         key.objectid = device->devid;
781         key.offset = start;
782         key.type = BTRFS_DEV_EXTENT_KEY;
783         ret = btrfs_insert_empty_item(trans, root, path, &key,
784                                       sizeof(*extent));
785         BUG_ON(ret);
786
787         leaf = path->nodes[0];
788         extent = btrfs_item_ptr(leaf, path->slots[0],
789                                 struct btrfs_dev_extent);
790         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
791         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
792         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
793
794         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
795                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
796                     BTRFS_UUID_SIZE);
797
798         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
799         btrfs_mark_buffer_dirty(leaf);
800         btrfs_free_path(path);
801         return ret;
802 }
803
804 static noinline int find_next_chunk(struct btrfs_root *root,
805                                     u64 objectid, u64 *offset)
806 {
807         struct btrfs_path *path;
808         int ret;
809         struct btrfs_key key;
810         struct btrfs_chunk *chunk;
811         struct btrfs_key found_key;
812
813         path = btrfs_alloc_path();
814         BUG_ON(!path);
815
816         key.objectid = objectid;
817         key.offset = (u64)-1;
818         key.type = BTRFS_CHUNK_ITEM_KEY;
819
820         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
821         if (ret < 0)
822                 goto error;
823
824         BUG_ON(ret == 0);
825
826         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
827         if (ret) {
828                 *offset = 0;
829         } else {
830                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
831                                       path->slots[0]);
832                 if (found_key.objectid != objectid)
833                         *offset = 0;
834                 else {
835                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
836                                                struct btrfs_chunk);
837                         *offset = found_key.offset +
838                                 btrfs_chunk_length(path->nodes[0], chunk);
839                 }
840         }
841         ret = 0;
842 error:
843         btrfs_free_path(path);
844         return ret;
845 }
846
847 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
848 {
849         int ret;
850         struct btrfs_key key;
851         struct btrfs_key found_key;
852         struct btrfs_path *path;
853
854         root = root->fs_info->chunk_root;
855
856         path = btrfs_alloc_path();
857         if (!path)
858                 return -ENOMEM;
859
860         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
861         key.type = BTRFS_DEV_ITEM_KEY;
862         key.offset = (u64)-1;
863
864         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
865         if (ret < 0)
866                 goto error;
867
868         BUG_ON(ret == 0);
869
870         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
871                                   BTRFS_DEV_ITEM_KEY);
872         if (ret) {
873                 *objectid = 1;
874         } else {
875                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
876                                       path->slots[0]);
877                 *objectid = found_key.offset + 1;
878         }
879         ret = 0;
880 error:
881         btrfs_free_path(path);
882         return ret;
883 }
884
885 /*
886  * the device information is stored in the chunk root
887  * the btrfs_device struct should be fully filled in
888  */
889 int btrfs_add_device(struct btrfs_trans_handle *trans,
890                      struct btrfs_root *root,
891                      struct btrfs_device *device)
892 {
893         int ret;
894         struct btrfs_path *path;
895         struct btrfs_dev_item *dev_item;
896         struct extent_buffer *leaf;
897         struct btrfs_key key;
898         unsigned long ptr;
899
900         root = root->fs_info->chunk_root;
901
902         path = btrfs_alloc_path();
903         if (!path)
904                 return -ENOMEM;
905
906         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
907         key.type = BTRFS_DEV_ITEM_KEY;
908         key.offset = device->devid;
909
910         ret = btrfs_insert_empty_item(trans, root, path, &key,
911                                       sizeof(*dev_item));
912         if (ret)
913                 goto out;
914
915         leaf = path->nodes[0];
916         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
917
918         btrfs_set_device_id(leaf, dev_item, device->devid);
919         btrfs_set_device_generation(leaf, dev_item, 0);
920         btrfs_set_device_type(leaf, dev_item, device->type);
921         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
922         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
923         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
924         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
925         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
926         btrfs_set_device_group(leaf, dev_item, 0);
927         btrfs_set_device_seek_speed(leaf, dev_item, 0);
928         btrfs_set_device_bandwidth(leaf, dev_item, 0);
929         btrfs_set_device_start_offset(leaf, dev_item, 0);
930
931         ptr = (unsigned long)btrfs_device_uuid(dev_item);
932         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
933         ptr = (unsigned long)btrfs_device_fsid(dev_item);
934         write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
935         btrfs_mark_buffer_dirty(leaf);
936
937         ret = 0;
938 out:
939         btrfs_free_path(path);
940         return ret;
941 }
942
943 static int btrfs_rm_dev_item(struct btrfs_root *root,
944                              struct btrfs_device *device)
945 {
946         int ret;
947         struct btrfs_path *path;
948         struct btrfs_key key;
949         struct btrfs_trans_handle *trans;
950
951         root = root->fs_info->chunk_root;
952
953         path = btrfs_alloc_path();
954         if (!path)
955                 return -ENOMEM;
956
957         trans = btrfs_start_transaction(root, 1);
958         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
959         key.type = BTRFS_DEV_ITEM_KEY;
960         key.offset = device->devid;
961         lock_chunks(root);
962
963         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
964         if (ret < 0)
965                 goto out;
966
967         if (ret > 0) {
968                 ret = -ENOENT;
969                 goto out;
970         }
971
972         ret = btrfs_del_item(trans, root, path);
973         if (ret)
974                 goto out;
975 out:
976         btrfs_free_path(path);
977         unlock_chunks(root);
978         btrfs_commit_transaction(trans, root);
979         return ret;
980 }
981
982 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
983 {
984         struct btrfs_device *device;
985         struct btrfs_device *next_device;
986         struct block_device *bdev;
987         struct buffer_head *bh = NULL;
988         struct btrfs_super_block *disk_super;
989         u64 all_avail;
990         u64 devid;
991         u64 num_devices;
992         u8 *dev_uuid;
993         int ret = 0;
994
995         mutex_lock(&uuid_mutex);
996         mutex_lock(&root->fs_info->volume_mutex);
997
998         all_avail = root->fs_info->avail_data_alloc_bits |
999                 root->fs_info->avail_system_alloc_bits |
1000                 root->fs_info->avail_metadata_alloc_bits;
1001
1002         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1003             root->fs_info->fs_devices->rw_devices <= 4) {
1004                 printk(KERN_ERR "btrfs: unable to go below four devices "
1005                        "on raid10\n");
1006                 ret = -EINVAL;
1007                 goto out;
1008         }
1009
1010         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1011             root->fs_info->fs_devices->rw_devices <= 2) {
1012                 printk(KERN_ERR "btrfs: unable to go below two "
1013                        "devices on raid1\n");
1014                 ret = -EINVAL;
1015                 goto out;
1016         }
1017
1018         if (strcmp(device_path, "missing") == 0) {
1019                 struct list_head *cur;
1020                 struct list_head *devices;
1021                 struct btrfs_device *tmp;
1022
1023                 device = NULL;
1024                 devices = &root->fs_info->fs_devices->devices;
1025                 list_for_each(cur, devices) {
1026                         tmp = list_entry(cur, struct btrfs_device, dev_list);
1027                         if (tmp->in_fs_metadata && !tmp->bdev) {
1028                                 device = tmp;
1029                                 break;
1030                         }
1031                 }
1032                 bdev = NULL;
1033                 bh = NULL;
1034                 disk_super = NULL;
1035                 if (!device) {
1036                         printk(KERN_ERR "btrfs: no missing devices found to "
1037                                "remove\n");
1038                         goto out;
1039                 }
1040         } else {
1041                 bdev = open_bdev_exclusive(device_path, FMODE_READ,
1042                                       root->fs_info->bdev_holder);
1043                 if (IS_ERR(bdev)) {
1044                         ret = PTR_ERR(bdev);
1045                         goto out;
1046                 }
1047
1048                 set_blocksize(bdev, 4096);
1049                 bh = btrfs_read_dev_super(bdev);
1050                 if (!bh) {
1051                         ret = -EIO;
1052                         goto error_close;
1053                 }
1054                 disk_super = (struct btrfs_super_block *)bh->b_data;
1055                 devid = le64_to_cpu(disk_super->dev_item.devid);
1056                 dev_uuid = disk_super->dev_item.uuid;
1057                 device = btrfs_find_device(root, devid, dev_uuid,
1058                                            disk_super->fsid);
1059                 if (!device) {
1060                         ret = -ENOENT;
1061                         goto error_brelse;
1062                 }
1063         }
1064
1065         if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1066                 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1067                        "device\n");
1068                 ret = -EINVAL;
1069                 goto error_brelse;
1070         }
1071
1072         if (device->writeable) {
1073                 list_del_init(&device->dev_alloc_list);
1074                 root->fs_info->fs_devices->rw_devices--;
1075         }
1076
1077         ret = btrfs_shrink_device(device, 0);
1078         if (ret)
1079                 goto error_brelse;
1080
1081         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1082         if (ret)
1083                 goto error_brelse;
1084
1085         device->in_fs_metadata = 0;
1086         list_del_init(&device->dev_list);
1087         device->fs_devices->num_devices--;
1088
1089         next_device = list_entry(root->fs_info->fs_devices->devices.next,
1090                                  struct btrfs_device, dev_list);
1091         if (device->bdev == root->fs_info->sb->s_bdev)
1092                 root->fs_info->sb->s_bdev = next_device->bdev;
1093         if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1094                 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1095
1096         if (device->bdev) {
1097                 close_bdev_exclusive(device->bdev, device->mode);
1098                 device->bdev = NULL;
1099                 device->fs_devices->open_devices--;
1100         }
1101
1102         num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1103         btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
1104
1105         if (device->fs_devices->open_devices == 0) {
1106                 struct btrfs_fs_devices *fs_devices;
1107                 fs_devices = root->fs_info->fs_devices;
1108                 while (fs_devices) {
1109                         if (fs_devices->seed == device->fs_devices)
1110                                 break;
1111                         fs_devices = fs_devices->seed;
1112                 }
1113                 fs_devices->seed = device->fs_devices->seed;
1114                 device->fs_devices->seed = NULL;
1115                 __btrfs_close_devices(device->fs_devices);
1116                 free_fs_devices(device->fs_devices);
1117         }
1118
1119         /*
1120          * at this point, the device is zero sized.  We want to
1121          * remove it from the devices list and zero out the old super
1122          */
1123         if (device->writeable) {
1124                 /* make sure this device isn't detected as part of
1125                  * the FS anymore
1126                  */
1127                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1128                 set_buffer_dirty(bh);
1129                 sync_dirty_buffer(bh);
1130         }
1131
1132         kfree(device->name);
1133         kfree(device);
1134         ret = 0;
1135
1136 error_brelse:
1137         brelse(bh);
1138 error_close:
1139         if (bdev)
1140                 close_bdev_exclusive(bdev, FMODE_READ);
1141 out:
1142         mutex_unlock(&root->fs_info->volume_mutex);
1143         mutex_unlock(&uuid_mutex);
1144         return ret;
1145 }
1146
1147 /*
1148  * does all the dirty work required for changing file system's UUID.
1149  */
1150 static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
1151                                 struct btrfs_root *root)
1152 {
1153         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1154         struct btrfs_fs_devices *old_devices;
1155         struct btrfs_fs_devices *seed_devices;
1156         struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
1157         struct btrfs_device *device;
1158         u64 super_flags;
1159
1160         BUG_ON(!mutex_is_locked(&uuid_mutex));
1161         if (!fs_devices->seeding)
1162                 return -EINVAL;
1163
1164         seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1165         if (!seed_devices)
1166                 return -ENOMEM;
1167
1168         old_devices = clone_fs_devices(fs_devices);
1169         if (IS_ERR(old_devices)) {
1170                 kfree(seed_devices);
1171                 return PTR_ERR(old_devices);
1172         }
1173
1174         list_add(&old_devices->list, &fs_uuids);
1175
1176         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1177         seed_devices->opened = 1;
1178         INIT_LIST_HEAD(&seed_devices->devices);
1179         INIT_LIST_HEAD(&seed_devices->alloc_list);
1180         list_splice_init(&fs_devices->devices, &seed_devices->devices);
1181         list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1182         list_for_each_entry(device, &seed_devices->devices, dev_list) {
1183                 device->fs_devices = seed_devices;
1184         }
1185
1186         fs_devices->seeding = 0;
1187         fs_devices->num_devices = 0;
1188         fs_devices->open_devices = 0;
1189         fs_devices->seed = seed_devices;
1190
1191         generate_random_uuid(fs_devices->fsid);
1192         memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1193         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1194         super_flags = btrfs_super_flags(disk_super) &
1195                       ~BTRFS_SUPER_FLAG_SEEDING;
1196         btrfs_set_super_flags(disk_super, super_flags);
1197
1198         return 0;
1199 }
1200
1201 /*
1202  * strore the expected generation for seed devices in device items.
1203  */
1204 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1205                                struct btrfs_root *root)
1206 {
1207         struct btrfs_path *path;
1208         struct extent_buffer *leaf;
1209         struct btrfs_dev_item *dev_item;
1210         struct btrfs_device *device;
1211         struct btrfs_key key;
1212         u8 fs_uuid[BTRFS_UUID_SIZE];
1213         u8 dev_uuid[BTRFS_UUID_SIZE];
1214         u64 devid;
1215         int ret;
1216
1217         path = btrfs_alloc_path();
1218         if (!path)
1219                 return -ENOMEM;
1220
1221         root = root->fs_info->chunk_root;
1222         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1223         key.offset = 0;
1224         key.type = BTRFS_DEV_ITEM_KEY;
1225
1226         while (1) {
1227                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1228                 if (ret < 0)
1229                         goto error;
1230
1231                 leaf = path->nodes[0];
1232 next_slot:
1233                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1234                         ret = btrfs_next_leaf(root, path);
1235                         if (ret > 0)
1236                                 break;
1237                         if (ret < 0)
1238                                 goto error;
1239                         leaf = path->nodes[0];
1240                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1241                         btrfs_release_path(root, path);
1242                         continue;
1243                 }
1244
1245                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1246                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1247                     key.type != BTRFS_DEV_ITEM_KEY)
1248                         break;
1249
1250                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1251                                           struct btrfs_dev_item);
1252                 devid = btrfs_device_id(leaf, dev_item);
1253                 read_extent_buffer(leaf, dev_uuid,
1254                                    (unsigned long)btrfs_device_uuid(dev_item),
1255                                    BTRFS_UUID_SIZE);
1256                 read_extent_buffer(leaf, fs_uuid,
1257                                    (unsigned long)btrfs_device_fsid(dev_item),
1258                                    BTRFS_UUID_SIZE);
1259                 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1260                 BUG_ON(!device);
1261
1262                 if (device->fs_devices->seeding) {
1263                         btrfs_set_device_generation(leaf, dev_item,
1264                                                     device->generation);
1265                         btrfs_mark_buffer_dirty(leaf);
1266                 }
1267
1268                 path->slots[0]++;
1269                 goto next_slot;
1270         }
1271         ret = 0;
1272 error:
1273         btrfs_free_path(path);
1274         return ret;
1275 }
1276
1277 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1278 {
1279         struct btrfs_trans_handle *trans;
1280         struct btrfs_device *device;
1281         struct block_device *bdev;
1282         struct list_head *cur;
1283         struct list_head *devices;
1284         struct super_block *sb = root->fs_info->sb;
1285         u64 total_bytes;
1286         int seeding_dev = 0;
1287         int ret = 0;
1288
1289         if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1290                 return -EINVAL;
1291
1292         bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
1293         if (!bdev)
1294                 return -EIO;
1295
1296         if (root->fs_info->fs_devices->seeding) {
1297                 seeding_dev = 1;
1298                 down_write(&sb->s_umount);
1299                 mutex_lock(&uuid_mutex);
1300         }
1301
1302         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1303         mutex_lock(&root->fs_info->volume_mutex);
1304
1305         devices = &root->fs_info->fs_devices->devices;
1306         list_for_each(cur, devices) {
1307                 device = list_entry(cur, struct btrfs_device, dev_list);
1308                 if (device->bdev == bdev) {
1309                         ret = -EEXIST;
1310                         goto error;
1311                 }
1312         }
1313
1314         device = kzalloc(sizeof(*device), GFP_NOFS);
1315         if (!device) {
1316                 /* we can safely leave the fs_devices entry around */
1317                 ret = -ENOMEM;
1318                 goto error;
1319         }
1320
1321         device->name = kstrdup(device_path, GFP_NOFS);
1322         if (!device->name) {
1323                 kfree(device);
1324                 ret = -ENOMEM;
1325                 goto error;
1326         }
1327
1328         ret = find_next_devid(root, &device->devid);
1329         if (ret) {
1330                 kfree(device);
1331                 goto error;
1332         }
1333
1334         trans = btrfs_start_transaction(root, 1);
1335         lock_chunks(root);
1336
1337         device->barriers = 1;
1338         device->writeable = 1;
1339         device->work.func = pending_bios_fn;
1340         generate_random_uuid(device->uuid);
1341         spin_lock_init(&device->io_lock);
1342         device->generation = trans->transid;
1343         device->io_width = root->sectorsize;
1344         device->io_align = root->sectorsize;
1345         device->sector_size = root->sectorsize;
1346         device->total_bytes = i_size_read(bdev->bd_inode);
1347         device->dev_root = root->fs_info->dev_root;
1348         device->bdev = bdev;
1349         device->in_fs_metadata = 1;
1350         device->mode = 0;
1351         set_blocksize(device->bdev, 4096);
1352
1353         if (seeding_dev) {
1354                 sb->s_flags &= ~MS_RDONLY;
1355                 ret = btrfs_prepare_sprout(trans, root);
1356                 BUG_ON(ret);
1357         }
1358
1359         device->fs_devices = root->fs_info->fs_devices;
1360         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1361         list_add(&device->dev_alloc_list,
1362                  &root->fs_info->fs_devices->alloc_list);
1363         root->fs_info->fs_devices->num_devices++;
1364         root->fs_info->fs_devices->open_devices++;
1365         root->fs_info->fs_devices->rw_devices++;
1366         root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1367
1368         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1369         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1370                                     total_bytes + device->total_bytes);
1371
1372         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1373         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1374                                     total_bytes + 1);
1375
1376         if (seeding_dev) {
1377                 ret = init_first_rw_device(trans, root, device);
1378                 BUG_ON(ret);
1379                 ret = btrfs_finish_sprout(trans, root);
1380                 BUG_ON(ret);
1381         } else {
1382                 ret = btrfs_add_device(trans, root, device);
1383         }
1384
1385         unlock_chunks(root);
1386         btrfs_commit_transaction(trans, root);
1387
1388         if (seeding_dev) {
1389                 mutex_unlock(&uuid_mutex);
1390                 up_write(&sb->s_umount);
1391
1392                 ret = btrfs_relocate_sys_chunks(root);
1393                 BUG_ON(ret);
1394         }
1395 out:
1396         mutex_unlock(&root->fs_info->volume_mutex);
1397         return ret;
1398 error:
1399         close_bdev_exclusive(bdev, 0);
1400         if (seeding_dev) {
1401                 mutex_unlock(&uuid_mutex);
1402                 up_write(&sb->s_umount);
1403         }
1404         goto out;
1405 }
1406
1407 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1408                                         struct btrfs_device *device)
1409 {
1410         int ret;
1411         struct btrfs_path *path;
1412         struct btrfs_root *root;
1413         struct btrfs_dev_item *dev_item;
1414         struct extent_buffer *leaf;
1415         struct btrfs_key key;
1416
1417         root = device->dev_root->fs_info->chunk_root;
1418
1419         path = btrfs_alloc_path();
1420         if (!path)
1421                 return -ENOMEM;
1422
1423         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1424         key.type = BTRFS_DEV_ITEM_KEY;
1425         key.offset = device->devid;
1426
1427         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1428         if (ret < 0)
1429                 goto out;
1430
1431         if (ret > 0) {
1432                 ret = -ENOENT;
1433                 goto out;
1434         }
1435
1436         leaf = path->nodes[0];
1437         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1438
1439         btrfs_set_device_id(leaf, dev_item, device->devid);
1440         btrfs_set_device_type(leaf, dev_item, device->type);
1441         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1442         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1443         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1444         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1445         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1446         btrfs_mark_buffer_dirty(leaf);
1447
1448 out:
1449         btrfs_free_path(path);
1450         return ret;
1451 }
1452
1453 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1454                       struct btrfs_device *device, u64 new_size)
1455 {
1456         struct btrfs_super_block *super_copy =
1457                 &device->dev_root->fs_info->super_copy;
1458         u64 old_total = btrfs_super_total_bytes(super_copy);
1459         u64 diff = new_size - device->total_bytes;
1460
1461         if (!device->writeable)
1462                 return -EACCES;
1463         if (new_size <= device->total_bytes)
1464                 return -EINVAL;
1465
1466         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1467         device->fs_devices->total_rw_bytes += diff;
1468
1469         device->total_bytes = new_size;
1470         return btrfs_update_device(trans, device);
1471 }
1472
1473 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1474                       struct btrfs_device *device, u64 new_size)
1475 {
1476         int ret;
1477         lock_chunks(device->dev_root);
1478         ret = __btrfs_grow_device(trans, device, new_size);
1479         unlock_chunks(device->dev_root);
1480         return ret;
1481 }
1482
1483 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1484                             struct btrfs_root *root,
1485                             u64 chunk_tree, u64 chunk_objectid,
1486                             u64 chunk_offset)
1487 {
1488         int ret;
1489         struct btrfs_path *path;
1490         struct btrfs_key key;
1491
1492         root = root->fs_info->chunk_root;
1493         path = btrfs_alloc_path();
1494         if (!path)
1495                 return -ENOMEM;
1496
1497         key.objectid = chunk_objectid;
1498         key.offset = chunk_offset;
1499         key.type = BTRFS_CHUNK_ITEM_KEY;
1500
1501         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1502         BUG_ON(ret);
1503
1504         ret = btrfs_del_item(trans, root, path);
1505         BUG_ON(ret);
1506
1507         btrfs_free_path(path);
1508         return 0;
1509 }
1510
1511 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1512                         chunk_offset)
1513 {
1514         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1515         struct btrfs_disk_key *disk_key;
1516         struct btrfs_chunk *chunk;
1517         u8 *ptr;
1518         int ret = 0;
1519         u32 num_stripes;
1520         u32 array_size;
1521         u32 len = 0;
1522         u32 cur;
1523         struct btrfs_key key;
1524
1525         array_size = btrfs_super_sys_array_size(super_copy);
1526
1527         ptr = super_copy->sys_chunk_array;
1528         cur = 0;
1529
1530         while (cur < array_size) {
1531                 disk_key = (struct btrfs_disk_key *)ptr;
1532                 btrfs_disk_key_to_cpu(&key, disk_key);
1533
1534                 len = sizeof(*disk_key);
1535
1536                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1537                         chunk = (struct btrfs_chunk *)(ptr + len);
1538                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1539                         len += btrfs_chunk_item_size(num_stripes);
1540                 } else {
1541                         ret = -EIO;
1542                         break;
1543                 }
1544                 if (key.objectid == chunk_objectid &&
1545                     key.offset == chunk_offset) {
1546                         memmove(ptr, ptr + len, array_size - (cur + len));
1547                         array_size -= len;
1548                         btrfs_set_super_sys_array_size(super_copy, array_size);
1549                 } else {
1550                         ptr += len;
1551                         cur += len;
1552                 }
1553         }
1554         return ret;
1555 }
1556
1557 static int btrfs_relocate_chunk(struct btrfs_root *root,
1558                          u64 chunk_tree, u64 chunk_objectid,
1559                          u64 chunk_offset)
1560 {
1561         struct extent_map_tree *em_tree;
1562         struct btrfs_root *extent_root;
1563         struct btrfs_trans_handle *trans;
1564         struct extent_map *em;
1565         struct map_lookup *map;
1566         int ret;
1567         int i;
1568
1569         printk(KERN_INFO "btrfs relocating chunk %llu\n",
1570                (unsigned long long)chunk_offset);
1571         root = root->fs_info->chunk_root;
1572         extent_root = root->fs_info->extent_root;
1573         em_tree = &root->fs_info->mapping_tree.map_tree;
1574
1575         /* step one, relocate all the extents inside this chunk */
1576         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1577         BUG_ON(ret);
1578
1579         trans = btrfs_start_transaction(root, 1);
1580         BUG_ON(!trans);
1581
1582         lock_chunks(root);
1583
1584         /*
1585          * step two, delete the device extents and the
1586          * chunk tree entries
1587          */
1588         spin_lock(&em_tree->lock);
1589         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1590         spin_unlock(&em_tree->lock);
1591
1592         BUG_ON(em->start > chunk_offset ||
1593                em->start + em->len < chunk_offset);
1594         map = (struct map_lookup *)em->bdev;
1595
1596         for (i = 0; i < map->num_stripes; i++) {
1597                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1598                                             map->stripes[i].physical);
1599                 BUG_ON(ret);
1600
1601                 if (map->stripes[i].dev) {
1602                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1603                         BUG_ON(ret);
1604                 }
1605         }
1606         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1607                                chunk_offset);
1608
1609         BUG_ON(ret);
1610
1611         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1612                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1613                 BUG_ON(ret);
1614         }
1615
1616         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1617         BUG_ON(ret);
1618
1619         spin_lock(&em_tree->lock);
1620         remove_extent_mapping(em_tree, em);
1621         spin_unlock(&em_tree->lock);
1622
1623         kfree(map);
1624         em->bdev = NULL;
1625
1626         /* once for the tree */
1627         free_extent_map(em);
1628         /* once for us */
1629         free_extent_map(em);
1630
1631         unlock_chunks(root);
1632         btrfs_end_transaction(trans, root);
1633         return 0;
1634 }
1635
1636 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
1637 {
1638         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
1639         struct btrfs_path *path;
1640         struct extent_buffer *leaf;
1641         struct btrfs_chunk *chunk;
1642         struct btrfs_key key;
1643         struct btrfs_key found_key;
1644         u64 chunk_tree = chunk_root->root_key.objectid;
1645         u64 chunk_type;
1646         int ret;
1647
1648         path = btrfs_alloc_path();
1649         if (!path)
1650                 return -ENOMEM;
1651
1652         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1653         key.offset = (u64)-1;
1654         key.type = BTRFS_CHUNK_ITEM_KEY;
1655
1656         while (1) {
1657                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1658                 if (ret < 0)
1659                         goto error;
1660                 BUG_ON(ret == 0);
1661
1662                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
1663                                           key.type);
1664                 if (ret < 0)
1665                         goto error;
1666                 if (ret > 0)
1667                         break;
1668
1669                 leaf = path->nodes[0];
1670                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1671
1672                 chunk = btrfs_item_ptr(leaf, path->slots[0],
1673                                        struct btrfs_chunk);
1674                 chunk_type = btrfs_chunk_type(leaf, chunk);
1675                 btrfs_release_path(chunk_root, path);
1676
1677                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
1678                         ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
1679                                                    found_key.objectid,
1680                                                    found_key.offset);
1681                         BUG_ON(ret);
1682                 }
1683
1684                 if (found_key.offset == 0)
1685                         break;
1686                 key.offset = found_key.offset - 1;
1687         }
1688         ret = 0;
1689 error:
1690         btrfs_free_path(path);
1691         return ret;
1692 }
1693
1694 static u64 div_factor(u64 num, int factor)
1695 {
1696         if (factor == 10)
1697                 return num;
1698         num *= factor;
1699         do_div(num, 10);
1700         return num;
1701 }
1702
1703 int btrfs_balance(struct btrfs_root *dev_root)
1704 {
1705         int ret;
1706         struct list_head *cur;
1707         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1708         struct btrfs_device *device;
1709         u64 old_size;
1710         u64 size_to_free;
1711         struct btrfs_path *path;
1712         struct btrfs_key key;
1713         struct btrfs_chunk *chunk;
1714         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1715         struct btrfs_trans_handle *trans;
1716         struct btrfs_key found_key;
1717
1718         if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
1719                 return -EROFS;
1720
1721         mutex_lock(&dev_root->fs_info->volume_mutex);
1722         dev_root = dev_root->fs_info->dev_root;
1723
1724         /* step one make some room on all the devices */
1725         list_for_each(cur, devices) {
1726                 device = list_entry(cur, struct btrfs_device, dev_list);
1727                 old_size = device->total_bytes;
1728                 size_to_free = div_factor(old_size, 1);
1729                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1730                 if (!device->writeable ||
1731                     device->total_bytes - device->bytes_used > size_to_free)
1732                         continue;
1733
1734                 ret = btrfs_shrink_device(device, old_size - size_to_free);
1735                 BUG_ON(ret);
1736
1737                 trans = btrfs_start_transaction(dev_root, 1);
1738                 BUG_ON(!trans);
1739
1740                 ret = btrfs_grow_device(trans, device, old_size);
1741                 BUG_ON(ret);
1742
1743                 btrfs_end_transaction(trans, dev_root);
1744         }
1745
1746         /* step two, relocate all the chunks */
1747         path = btrfs_alloc_path();
1748         BUG_ON(!path);
1749
1750         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1751         key.offset = (u64)-1;
1752         key.type = BTRFS_CHUNK_ITEM_KEY;
1753
1754         while (1) {
1755                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1756                 if (ret < 0)
1757                         goto error;
1758
1759                 /*
1760                  * this shouldn't happen, it means the last relocate
1761                  * failed
1762                  */
1763                 if (ret == 0)
1764                         break;
1765
1766                 ret = btrfs_previous_item(chunk_root, path, 0,
1767                                           BTRFS_CHUNK_ITEM_KEY);
1768                 if (ret)
1769                         break;
1770
1771                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1772                                       path->slots[0]);
1773                 if (found_key.objectid != key.objectid)
1774                         break;
1775
1776                 chunk = btrfs_item_ptr(path->nodes[0],
1777                                        path->slots[0],
1778                                        struct btrfs_chunk);
1779                 key.offset = found_key.offset;
1780                 /* chunk zero is special */
1781                 if (key.offset == 0)
1782                         break;
1783
1784                 btrfs_release_path(chunk_root, path);
1785                 ret = btrfs_relocate_chunk(chunk_root,
1786                                            chunk_root->root_key.objectid,
1787                                            found_key.objectid,
1788                                            found_key.offset);
1789                 BUG_ON(ret);
1790         }
1791         ret = 0;
1792 error:
1793         btrfs_free_path(path);
1794         mutex_unlock(&dev_root->fs_info->volume_mutex);
1795         return ret;
1796 }
1797
1798 /*
1799  * shrinking a device means finding all of the device extents past
1800  * the new size, and then following the back refs to the chunks.
1801  * The chunk relocation code actually frees the device extent
1802  */
1803 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1804 {
1805         struct btrfs_trans_handle *trans;
1806         struct btrfs_root *root = device->dev_root;
1807         struct btrfs_dev_extent *dev_extent = NULL;
1808         struct btrfs_path *path;
1809         u64 length;
1810         u64 chunk_tree;
1811         u64 chunk_objectid;
1812         u64 chunk_offset;
1813         int ret;
1814         int slot;
1815         struct extent_buffer *l;
1816         struct btrfs_key key;
1817         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1818         u64 old_total = btrfs_super_total_bytes(super_copy);
1819         u64 diff = device->total_bytes - new_size;
1820
1821         if (new_size >= device->total_bytes)
1822                 return -EINVAL;
1823
1824         path = btrfs_alloc_path();
1825         if (!path)
1826                 return -ENOMEM;
1827
1828         trans = btrfs_start_transaction(root, 1);
1829         if (!trans) {
1830                 ret = -ENOMEM;
1831                 goto done;
1832         }
1833
1834         path->reada = 2;
1835
1836         lock_chunks(root);
1837
1838         device->total_bytes = new_size;
1839         if (device->writeable)
1840                 device->fs_devices->total_rw_bytes -= diff;
1841         ret = btrfs_update_device(trans, device);
1842         if (ret) {
1843                 unlock_chunks(root);
1844                 btrfs_end_transaction(trans, root);
1845                 goto done;
1846         }
1847         WARN_ON(diff > old_total);
1848         btrfs_set_super_total_bytes(super_copy, old_total - diff);
1849         unlock_chunks(root);
1850         btrfs_end_transaction(trans, root);
1851
1852         key.objectid = device->devid;
1853         key.offset = (u64)-1;
1854         key.type = BTRFS_DEV_EXTENT_KEY;
1855
1856         while (1) {
1857                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1858                 if (ret < 0)
1859                         goto done;
1860
1861                 ret = btrfs_previous_item(root, path, 0, key.type);
1862                 if (ret < 0)
1863                         goto done;
1864                 if (ret) {
1865                         ret = 0;
1866                         goto done;
1867                 }
1868
1869                 l = path->nodes[0];
1870                 slot = path->slots[0];
1871                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1872
1873                 if (key.objectid != device->devid)
1874                         goto done;
1875
1876                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1877                 length = btrfs_dev_extent_length(l, dev_extent);
1878
1879                 if (key.offset + length <= new_size)
1880                         goto done;
1881
1882                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1883                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1884                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1885                 btrfs_release_path(root, path);
1886
1887                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1888                                            chunk_offset);
1889                 if (ret)
1890                         goto done;
1891         }
1892
1893 done:
1894         btrfs_free_path(path);
1895         return ret;
1896 }
1897
1898 static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1899                            struct btrfs_root *root,
1900                            struct btrfs_key *key,
1901                            struct btrfs_chunk *chunk, int item_size)
1902 {
1903         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1904         struct btrfs_disk_key disk_key;
1905         u32 array_size;
1906         u8 *ptr;
1907
1908         array_size = btrfs_super_sys_array_size(super_copy);
1909         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1910                 return -EFBIG;
1911
1912         ptr = super_copy->sys_chunk_array + array_size;
1913         btrfs_cpu_key_to_disk(&disk_key, key);
1914         memcpy(ptr, &disk_key, sizeof(disk_key));
1915         ptr += sizeof(disk_key);
1916         memcpy(ptr, chunk, item_size);
1917         item_size += sizeof(disk_key);
1918         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1919         return 0;
1920 }
1921
1922 static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
1923                                         int num_stripes, int sub_stripes)
1924 {
1925         if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1926                 return calc_size;
1927         else if (type & BTRFS_BLOCK_GROUP_RAID10)
1928                 return calc_size * (num_stripes / sub_stripes);
1929         else
1930                 return calc_size * num_stripes;
1931 }
1932
1933 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1934                                struct btrfs_root *extent_root,
1935                                struct map_lookup **map_ret,
1936                                u64 *num_bytes, u64 *stripe_size,
1937                                u64 start, u64 type)
1938 {
1939         struct btrfs_fs_info *info = extent_root->fs_info;
1940         struct btrfs_device *device = NULL;
1941         struct btrfs_fs_devices *fs_devices = info->fs_devices;
1942         struct list_head *cur;
1943         struct map_lookup *map = NULL;
1944         struct extent_map_tree *em_tree;
1945         struct extent_map *em;
1946         struct list_head private_devs;
1947         int min_stripe_size = 1 * 1024 * 1024;
1948         u64 calc_size = 1024 * 1024 * 1024;
1949         u64 max_chunk_size = calc_size;
1950         u64 min_free;
1951         u64 avail;
1952         u64 max_avail = 0;
1953         u64 dev_offset;
1954         int num_stripes = 1;
1955         int min_stripes = 1;
1956         int sub_stripes = 0;
1957         int looped = 0;
1958         int ret;
1959         int index;
1960         int stripe_len = 64 * 1024;
1961
1962         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1963             (type & BTRFS_BLOCK_GROUP_DUP)) {
1964                 WARN_ON(1);
1965                 type &= ~BTRFS_BLOCK_GROUP_DUP;
1966         }
1967         if (list_empty(&fs_devices->alloc_list))
1968                 return -ENOSPC;
1969
1970         if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1971                 num_stripes = fs_devices->rw_devices;
1972                 min_stripes = 2;
1973         }
1974         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1975                 num_stripes = 2;
1976                 min_stripes = 2;
1977         }
1978         if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1979                 num_stripes = min_t(u64, 2, fs_devices->rw_devices);
1980                 if (num_stripes < 2)
1981                         return -ENOSPC;
1982                 min_stripes = 2;
1983         }
1984         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1985                 num_stripes = fs_devices->rw_devices;
1986                 if (num_stripes < 4)
1987                         return -ENOSPC;
1988                 num_stripes &= ~(u32)1;
1989                 sub_stripes = 2;
1990                 min_stripes = 4;
1991         }
1992
1993         if (type & BTRFS_BLOCK_GROUP_DATA) {
1994                 max_chunk_size = 10 * calc_size;
1995                 min_stripe_size = 64 * 1024 * 1024;
1996         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1997                 max_chunk_size = 4 * calc_size;
1998                 min_stripe_size = 32 * 1024 * 1024;
1999         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
2000                 calc_size = 8 * 1024 * 1024;
2001                 max_chunk_size = calc_size * 2;
2002                 min_stripe_size = 1 * 1024 * 1024;
2003         }
2004
2005         /* we don't want a chunk larger than 10% of writeable space */
2006         max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
2007                              max_chunk_size);
2008
2009 again:
2010         if (!map || map->num_stripes != num_stripes) {
2011                 kfree(map);
2012                 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2013                 if (!map)
2014                         return -ENOMEM;
2015                 map->num_stripes = num_stripes;
2016         }
2017
2018         if (calc_size * num_stripes > max_chunk_size) {
2019                 calc_size = max_chunk_size;
2020                 do_div(calc_size, num_stripes);
2021                 do_div(calc_size, stripe_len);
2022                 calc_size *= stripe_len;
2023         }
2024         /* we don't want tiny stripes */
2025         calc_size = max_t(u64, min_stripe_size, calc_size);
2026
2027         do_div(calc_size, stripe_len);
2028         calc_size *= stripe_len;
2029
2030         cur = fs_devices->alloc_list.next;
2031         index = 0;
2032
2033         if (type & BTRFS_BLOCK_GROUP_DUP)
2034                 min_free = calc_size * 2;
2035         else
2036                 min_free = calc_size;
2037
2038         /*
2039          * we add 1MB because we never use the first 1MB of the device, unless
2040          * we've looped, then we are likely allocating the maximum amount of
2041          * space left already
2042          */
2043         if (!looped)
2044                 min_free += 1024 * 1024;
2045
2046         INIT_LIST_HEAD(&private_devs);
2047         while (index < num_stripes) {
2048                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
2049                 BUG_ON(!device->writeable);
2050                 if (device->total_bytes > device->bytes_used)
2051                         avail = device->total_bytes - device->bytes_used;
2052                 else
2053                         avail = 0;
2054                 cur = cur->next;
2055
2056                 if (device->in_fs_metadata && avail >= min_free) {
2057                         ret = find_free_dev_extent(trans, device,
2058                                                    min_free, &dev_offset);
2059                         if (ret == 0) {
2060                                 list_move_tail(&device->dev_alloc_list,
2061                                                &private_devs);
2062                                 map->stripes[index].dev = device;
2063                                 map->stripes[index].physical = dev_offset;
2064                                 index++;
2065                                 if (type & BTRFS_BLOCK_GROUP_DUP) {
2066                                         map->stripes[index].dev = device;
2067                                         map->stripes[index].physical =
2068                                                 dev_offset + calc_size;
2069                                         index++;
2070                                 }
2071                         }
2072                 } else if (device->in_fs_metadata && avail > max_avail)
2073                         max_avail = avail;
2074                 if (cur == &fs_devices->alloc_list)
2075                         break;
2076         }
2077         list_splice(&private_devs, &fs_devices->alloc_list);
2078         if (index < num_stripes) {
2079                 if (index >= min_stripes) {
2080                         num_stripes = index;
2081                         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
2082                                 num_stripes /= sub_stripes;
2083                                 num_stripes *= sub_stripes;
2084                         }
2085                         looped = 1;
2086                         goto again;
2087                 }
2088                 if (!looped && max_avail > 0) {
2089                         looped = 1;
2090                         calc_size = max_avail;
2091                         goto again;
2092                 }
2093                 kfree(map);
2094                 return -ENOSPC;
2095         }
2096         map->sector_size = extent_root->sectorsize;
2097         map->stripe_len = stripe_len;
2098         map->io_align = stripe_len;
2099         map->io_width = stripe_len;
2100         map->type = type;
2101         map->num_stripes = num_stripes;
2102         map->sub_stripes = sub_stripes;
2103
2104         *map_ret = map;
2105         *stripe_size = calc_size;
2106         *num_bytes = chunk_bytes_by_type(type, calc_size,
2107                                          num_stripes, sub_stripes);
2108
2109         em = alloc_extent_map(GFP_NOFS);
2110         if (!em) {
2111                 kfree(map);
2112                 return -ENOMEM;
2113         }
2114         em->bdev = (struct block_device *)map;
2115         em->start = start;
2116         em->len = *num_bytes;
2117         em->block_start = 0;
2118         em->block_len = em->len;
2119
2120         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
2121         spin_lock(&em_tree->lock);
2122         ret = add_extent_mapping(em_tree, em);
2123         spin_unlock(&em_tree->lock);
2124         BUG_ON(ret);
2125         free_extent_map(em);
2126
2127         ret = btrfs_make_block_group(trans, extent_root, 0, type,
2128                                      BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2129                                      start, *num_bytes);
2130         BUG_ON(ret);
2131
2132         index = 0;
2133         while (index < map->num_stripes) {
2134                 device = map->stripes[index].dev;
2135                 dev_offset = map->stripes[index].physical;
2136
2137                 ret = btrfs_alloc_dev_extent(trans, device,
2138                                 info->chunk_root->root_key.objectid,
2139                                 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2140                                 start, dev_offset, calc_size);
2141                 BUG_ON(ret);
2142                 index++;
2143         }
2144
2145         return 0;
2146 }
2147
2148 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
2149                                 struct btrfs_root *extent_root,
2150                                 struct map_lookup *map, u64 chunk_offset,
2151                                 u64 chunk_size, u64 stripe_size)
2152 {
2153         u64 dev_offset;
2154         struct btrfs_key key;
2155         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2156         struct btrfs_device *device;
2157         struct btrfs_chunk *chunk;
2158         struct btrfs_stripe *stripe;
2159         size_t item_size = btrfs_chunk_item_size(map->num_stripes);
2160         int index = 0;
2161         int ret;
2162
2163         chunk = kzalloc(item_size, GFP_NOFS);
2164         if (!chunk)
2165                 return -ENOMEM;
2166
2167         index = 0;
2168         while (index < map->num_stripes) {
2169                 device = map->stripes[index].dev;
2170                 device->bytes_used += stripe_size;
2171                 ret = btrfs_update_device(trans, device);
2172                 BUG_ON(ret);
2173                 index++;
2174         }
2175
2176         index = 0;
2177         stripe = &chunk->stripe;
2178         while (index < map->num_stripes) {
2179                 device = map->stripes[index].dev;
2180                 dev_offset = map->stripes[index].physical;
2181
2182                 btrfs_set_stack_stripe_devid(stripe, device->devid);
2183                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
2184                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
2185                 stripe++;
2186                 index++;
2187         }
2188
2189         btrfs_set_stack_chunk_length(chunk, chunk_size);
2190         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
2191         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
2192         btrfs_set_stack_chunk_type(chunk, map->type);
2193         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
2194         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
2195         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
2196         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
2197         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
2198
2199         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2200         key.type = BTRFS_CHUNK_ITEM_KEY;
2201         key.offset = chunk_offset;
2202
2203         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
2204         BUG_ON(ret);
2205
2206         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2207                 ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
2208                                              item_size);
2209                 BUG_ON(ret);
2210         }
2211         kfree(chunk);
2212         return 0;
2213 }
2214
2215 /*
2216  * Chunk allocation falls into two parts. The first part does works
2217  * that make the new allocated chunk useable, but not do any operation
2218  * that modifies the chunk tree. The second part does the works that
2219  * require modifying the chunk tree. This division is important for the
2220  * bootstrap process of adding storage to a seed btrfs.
2221  */
2222 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
2223                       struct btrfs_root *extent_root, u64 type)
2224 {
2225         u64 chunk_offset;
2226         u64 chunk_size;
2227         u64 stripe_size;
2228         struct map_lookup *map;
2229         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
2230         int ret;
2231
2232         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
2233                               &chunk_offset);
2234         if (ret)
2235                 return ret;
2236
2237         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2238                                   &stripe_size, chunk_offset, type);
2239         if (ret)
2240                 return ret;
2241
2242         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2243                                    chunk_size, stripe_size);
2244         BUG_ON(ret);
2245         return 0;
2246 }
2247
2248 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
2249                                          struct btrfs_root *root,
2250                                          struct btrfs_device *device)
2251 {
2252         u64 chunk_offset;
2253         u64 sys_chunk_offset;
2254         u64 chunk_size;
2255         u64 sys_chunk_size;
2256         u64 stripe_size;
2257         u64 sys_stripe_size;
2258         u64 alloc_profile;
2259         struct map_lookup *map;
2260         struct map_lookup *sys_map;
2261         struct btrfs_fs_info *fs_info = root->fs_info;
2262         struct btrfs_root *extent_root = fs_info->extent_root;
2263         int ret;
2264
2265         ret = find_next_chunk(fs_info->chunk_root,
2266                               BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
2267         BUG_ON(ret);
2268
2269         alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
2270                         (fs_info->metadata_alloc_profile &
2271                          fs_info->avail_metadata_alloc_bits);
2272         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2273
2274         ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
2275                                   &stripe_size, chunk_offset, alloc_profile);
2276         BUG_ON(ret);
2277
2278         sys_chunk_offset = chunk_offset + chunk_size;
2279
2280         alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
2281                         (fs_info->system_alloc_profile &
2282                          fs_info->avail_system_alloc_bits);
2283         alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
2284
2285         ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
2286                                   &sys_chunk_size, &sys_stripe_size,
2287                                   sys_chunk_offset, alloc_profile);
2288         BUG_ON(ret);
2289
2290         ret = btrfs_add_device(trans, fs_info->chunk_root, device);
2291         BUG_ON(ret);
2292
2293         /*
2294          * Modifying chunk tree needs allocating new blocks from both
2295          * system block group and metadata block group. So we only can
2296          * do operations require modifying the chunk tree after both
2297          * block groups were created.
2298          */
2299         ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
2300                                    chunk_size, stripe_size);
2301         BUG_ON(ret);
2302
2303         ret = __finish_chunk_alloc(trans, extent_root, sys_map,
2304                                    sys_chunk_offset, sys_chunk_size,
2305                                    sys_stripe_size);
2306         BUG_ON(ret);
2307         return 0;
2308 }
2309
2310 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
2311 {
2312         struct extent_map *em;
2313         struct map_lookup *map;
2314         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2315         int readonly = 0;
2316         int i;
2317
2318         spin_lock(&map_tree->map_tree.lock);
2319         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2320         spin_unlock(&map_tree->map_tree.lock);
2321         if (!em)
2322                 return 1;
2323
2324         map = (struct map_lookup *)em->bdev;
2325         for (i = 0; i < map->num_stripes; i++) {
2326                 if (!map->stripes[i].dev->writeable) {
2327                         readonly = 1;
2328                         break;
2329                 }
2330         }
2331         free_extent_map(em);
2332         return readonly;
2333 }
2334
2335 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
2336 {
2337         extent_map_tree_init(&tree->map_tree, GFP_NOFS);
2338 }
2339
2340 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
2341 {
2342         struct extent_map *em;
2343
2344         while (1) {
2345                 spin_lock(&tree->map_tree.lock);
2346                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
2347                 if (em)
2348                         remove_extent_mapping(&tree->map_tree, em);
2349                 spin_unlock(&tree->map_tree.lock);
2350                 if (!em)
2351                         break;
2352                 kfree(em->bdev);
2353                 /* once for us */
2354                 free_extent_map(em);
2355                 /* once for the tree */
2356                 free_extent_map(em);
2357         }
2358 }
2359
2360 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
2361 {
2362         struct extent_map *em;
2363         struct map_lookup *map;
2364         struct extent_map_tree *em_tree = &map_tree->map_tree;
2365         int ret;
2366
2367         spin_lock(&em_tree->lock);
2368         em = lookup_extent_mapping(em_tree, logical, len);
2369         spin_unlock(&em_tree->lock);
2370         BUG_ON(!em);
2371
2372         BUG_ON(em->start > logical || em->start + em->len < logical);
2373         map = (struct map_lookup *)em->bdev;
2374         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
2375                 ret = map->num_stripes;
2376         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2377                 ret = map->sub_stripes;
2378         else
2379                 ret = 1;
2380         free_extent_map(em);
2381         return ret;
2382 }
2383
2384 static int find_live_mirror(struct map_lookup *map, int first, int num,
2385                             int optimal)
2386 {
2387         int i;
2388         if (map->stripes[optimal].dev->bdev)
2389                 return optimal;
2390         for (i = first; i < first + num; i++) {
2391                 if (map->stripes[i].dev->bdev)
2392                         return i;
2393         }
2394         /* we couldn't find one that doesn't fail.  Just return something
2395          * and the io error handling code will clean up eventually
2396          */
2397         return optimal;
2398 }
2399
2400 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2401                              u64 logical, u64 *length,
2402                              struct btrfs_multi_bio **multi_ret,
2403                              int mirror_num, struct page *unplug_page)
2404 {
2405         struct extent_map *em;
2406         struct map_lookup *map;
2407         struct extent_map_tree *em_tree = &map_tree->map_tree;
2408         u64 offset;
2409         u64 stripe_offset;
2410         u64 stripe_nr;
2411         int stripes_allocated = 8;
2412         int stripes_required = 1;
2413         int stripe_index;
2414         int i;
2415         int num_stripes;
2416         int max_errors = 0;
2417         struct btrfs_multi_bio *multi = NULL;
2418
2419         if (multi_ret && !(rw & (1 << BIO_RW)))
2420                 stripes_allocated = 1;
2421 again:
2422         if (multi_ret) {
2423                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
2424                                 GFP_NOFS);
2425                 if (!multi)
2426                         return -ENOMEM;
2427
2428                 atomic_set(&multi->error, 0);
2429         }
2430
2431         spin_lock(&em_tree->lock);
2432         em = lookup_extent_mapping(em_tree, logical, *length);
2433         spin_unlock(&em_tree->lock);
2434
2435         if (!em && unplug_page)
2436                 return 0;
2437
2438         if (!em) {
2439                 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
2440                        (unsigned long long)logical,
2441                        (unsigned long long)*length);
2442                 BUG();
2443         }
2444
2445         BUG_ON(em->start > logical || em->start + em->len < logical);
2446         map = (struct map_lookup *)em->bdev;
2447         offset = logical - em->start;
2448
2449         if (mirror_num > map->num_stripes)
2450                 mirror_num = 0;
2451
2452         /* if our multi bio struct is too small, back off and try again */
2453         if (rw & (1 << BIO_RW)) {
2454                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
2455                                  BTRFS_BLOCK_GROUP_DUP)) {
2456                         stripes_required = map->num_stripes;
2457                         max_errors = 1;
2458                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2459                         stripes_required = map->sub_stripes;
2460                         max_errors = 1;
2461                 }
2462         }
2463         if (multi_ret && rw == WRITE &&
2464             stripes_allocated < stripes_required) {
2465                 stripes_allocated = map->num_stripes;
2466                 free_extent_map(em);
2467                 kfree(multi);
2468                 goto again;
2469         }
2470         stripe_nr = offset;
2471         /*
2472          * stripe_nr counts the total number of stripes we have to stride
2473          * to get to this block
2474          */
2475         do_div(stripe_nr, map->stripe_len);
2476
2477         stripe_offset = stripe_nr * map->stripe_len;
2478         BUG_ON(offset < stripe_offset);
2479
2480         /* stripe_offset is the offset of this block in its stripe*/
2481         stripe_offset = offset - stripe_offset;
2482
2483         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2484                          BTRFS_BLOCK_GROUP_RAID10 |
2485                          BTRFS_BLOCK_GROUP_DUP)) {
2486                 /* we limit the length of each bio to what fits in a stripe */
2487                 *length = min_t(u64, em->len - offset,
2488                               map->stripe_len - stripe_offset);
2489         } else {
2490                 *length = em->len - offset;
2491         }
2492
2493         if (!multi_ret && !unplug_page)
2494                 goto out;
2495
2496         num_stripes = 1;
2497         stripe_index = 0;
2498         if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
2499                 if (unplug_page || (rw & (1 << BIO_RW)))
2500                         num_stripes = map->num_stripes;
2501                 else if (mirror_num)
2502                         stripe_index = mirror_num - 1;
2503                 else {
2504                         stripe_index = find_live_mirror(map, 0,
2505                                             map->num_stripes,
2506                                             current->pid % map->num_stripes);
2507                 }
2508
2509         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2510                 if (rw & (1 << BIO_RW))
2511                         num_stripes = map->num_stripes;
2512                 else if (mirror_num)
2513                         stripe_index = mirror_num - 1;
2514
2515         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2516                 int factor = map->num_stripes / map->sub_stripes;
2517
2518                 stripe_index = do_div(stripe_nr, factor);
2519                 stripe_index *= map->sub_stripes;
2520
2521                 if (unplug_page || (rw & (1 << BIO_RW)))
2522                         num_stripes = map->sub_stripes;
2523                 else if (mirror_num)
2524                         stripe_index += mirror_num - 1;
2525                 else {
2526                         stripe_index = find_live_mirror(map, stripe_index,
2527                                               map->sub_stripes, stripe_index +
2528                                               current->pid % map->sub_stripes);
2529                 }
2530         } else {
2531                 /*
2532                  * after this do_div call, stripe_nr is the number of stripes
2533                  * on this device we have to walk to find the data, and
2534                  * stripe_index is the number of our device in the stripe array
2535                  */
2536                 stripe_index = do_div(stripe_nr, map->num_stripes);
2537         }
2538         BUG_ON(stripe_index >= map->num_stripes);
2539
2540         for (i = 0; i < num_stripes; i++) {
2541                 if (unplug_page) {
2542                         struct btrfs_device *device;
2543                         struct backing_dev_info *bdi;
2544
2545                         device = map->stripes[stripe_index].dev;
2546                         if (device->bdev) {
2547                                 bdi = blk_get_backing_dev_info(device->bdev);
2548                                 if (bdi->unplug_io_fn)
2549                                         bdi->unplug_io_fn(bdi, unplug_page);
2550                         }
2551                 } else {
2552                         multi->stripes[i].physical =
2553                                 map->stripes[stripe_index].physical +
2554                                 stripe_offset + stripe_nr * map->stripe_len;
2555                         multi->stripes[i].dev = map->stripes[stripe_index].dev;
2556                 }
2557                 stripe_index++;
2558         }
2559         if (multi_ret) {
2560                 *multi_ret = multi;
2561                 multi->num_stripes = num_stripes;
2562                 multi->max_errors = max_errors;
2563         }
2564 out:
2565         free_extent_map(em);
2566         return 0;
2567 }
2568
2569 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2570                       u64 logical, u64 *length,
2571                       struct btrfs_multi_bio **multi_ret, int mirror_num)
2572 {
2573         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2574                                  mirror_num, NULL);
2575 }
2576
2577 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
2578                      u64 chunk_start, u64 physical, u64 devid,
2579                      u64 **logical, int *naddrs, int *stripe_len)
2580 {
2581         struct extent_map_tree *em_tree = &map_tree->map_tree;
2582         struct extent_map *em;
2583         struct map_lookup *map;
2584         u64 *buf;
2585         u64 bytenr;
2586         u64 length;
2587         u64 stripe_nr;
2588         int i, j, nr = 0;
2589
2590         spin_lock(&em_tree->lock);
2591         em = lookup_extent_mapping(em_tree, chunk_start, 1);
2592         spin_unlock(&em_tree->lock);
2593
2594         BUG_ON(!em || em->start != chunk_start);
2595         map = (struct map_lookup *)em->bdev;
2596
2597         length = em->len;
2598         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
2599                 do_div(length, map->num_stripes / map->sub_stripes);
2600         else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
2601                 do_div(length, map->num_stripes);
2602
2603         buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
2604         BUG_ON(!buf);
2605
2606         for (i = 0; i < map->num_stripes; i++) {
2607                 if (devid && map->stripes[i].dev->devid != devid)
2608                         continue;
2609                 if (map->stripes[i].physical > physical ||
2610                     map->stripes[i].physical + length <= physical)
2611                         continue;
2612
2613                 stripe_nr = physical - map->stripes[i].physical;
2614                 do_div(stripe_nr, map->stripe_len);
2615
2616                 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2617                         stripe_nr = stripe_nr * map->num_stripes + i;
2618                         do_div(stripe_nr, map->sub_stripes);
2619                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
2620                         stripe_nr = stripe_nr * map->num_stripes + i;
2621                 }
2622                 bytenr = chunk_start + stripe_nr * map->stripe_len;
2623                 WARN_ON(nr >= map->num_stripes);
2624                 for (j = 0; j < nr; j++) {
2625                         if (buf[j] == bytenr)
2626                                 break;
2627                 }
2628                 if (j == nr) {
2629                         WARN_ON(nr >= map->num_stripes);
2630                         buf[nr++] = bytenr;
2631                 }
2632         }
2633
2634         for (i = 0; i > nr; i++) {
2635                 struct btrfs_multi_bio *multi;
2636                 struct btrfs_bio_stripe *stripe;
2637                 int ret;
2638
2639                 length = 1;
2640                 ret = btrfs_map_block(map_tree, WRITE, buf[i],
2641                                       &length, &multi, 0);
2642                 BUG_ON(ret);
2643
2644                 stripe = multi->stripes;
2645                 for (j = 0; j < multi->num_stripes; j++) {
2646                         if (stripe->physical >= physical &&
2647                             physical < stripe->physical + length)
2648                                 break;
2649                 }
2650                 BUG_ON(j >= multi->num_stripes);
2651                 kfree(multi);
2652         }
2653
2654         *logical = buf;
2655         *naddrs = nr;
2656         *stripe_len = map->stripe_len;
2657
2658         free_extent_map(em);
2659         return 0;
2660 }
2661
2662 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2663                       u64 logical, struct page *page)
2664 {
2665         u64 length = PAGE_CACHE_SIZE;
2666         return __btrfs_map_block(map_tree, READ, logical, &length,
2667                                  NULL, 0, page);
2668 }
2669
2670 static void end_bio_multi_stripe(struct bio *bio, int err)
2671 {
2672         struct btrfs_multi_bio *multi = bio->bi_private;
2673         int is_orig_bio = 0;
2674
2675         if (err)
2676                 atomic_inc(&multi->error);
2677
2678         if (bio == multi->orig_bio)
2679                 is_orig_bio = 1;
2680
2681         if (atomic_dec_and_test(&multi->stripes_pending)) {
2682                 if (!is_orig_bio) {
2683                         bio_put(bio);
2684                         bio = multi->orig_bio;
2685                 }
2686                 bio->bi_private = multi->private;
2687                 bio->bi_end_io = multi->end_io;
2688                 /* only send an error to the higher layers if it is
2689                  * beyond the tolerance of the multi-bio
2690                  */
2691                 if (atomic_read(&multi->error) > multi->max_errors) {
2692                         err = -EIO;
2693                 } else if (err) {
2694                         /*
2695                          * this bio is actually up to date, we didn't
2696                          * go over the max number of errors
2697                          */
2698                         set_bit(BIO_UPTODATE, &bio->bi_flags);
2699                         err = 0;
2700                 }
2701                 kfree(multi);
2702
2703                 bio_endio(bio, err);
2704         } else if (!is_orig_bio) {
2705                 bio_put(bio);
2706         }
2707 }
2708
2709 struct async_sched {
2710         struct bio *bio;
2711         int rw;
2712         struct btrfs_fs_info *info;
2713         struct btrfs_work work;
2714 };
2715
2716 /*
2717  * see run_scheduled_bios for a description of why bios are collected for
2718  * async submit.
2719  *
2720  * This will add one bio to the pending list for a device and make sure
2721  * the work struct is scheduled.
2722  */
2723 static noinline int schedule_bio(struct btrfs_root *root,
2724                                  struct btrfs_device *device,
2725                                  int rw, struct bio *bio)
2726 {
2727         int should_queue = 1;
2728
2729         /* don't bother with additional async steps for reads, right now */
2730         if (!(rw & (1 << BIO_RW))) {
2731                 bio_get(bio);
2732                 submit_bio(rw, bio);
2733                 bio_put(bio);
2734                 return 0;
2735         }
2736
2737         /*
2738          * nr_async_bios allows us to reliably return congestion to the
2739          * higher layers.  Otherwise, the async bio makes it appear we have
2740          * made progress against dirty pages when we've really just put it
2741          * on a queue for later
2742          */
2743         atomic_inc(&root->fs_info->nr_async_bios);
2744         WARN_ON(bio->bi_next);
2745         bio->bi_next = NULL;
2746         bio->bi_rw |= rw;
2747
2748         spin_lock(&device->io_lock);
2749
2750         if (device->pending_bio_tail)
2751                 device->pending_bio_tail->bi_next = bio;
2752
2753         device->pending_bio_tail = bio;
2754         if (!device->pending_bios)
2755                 device->pending_bios = bio;
2756         if (device->running_pending)
2757                 should_queue = 0;
2758
2759         spin_unlock(&device->io_lock);
2760
2761         if (should_queue)
2762                 btrfs_queue_worker(&root->fs_info->submit_workers,
2763                                    &device->work);
2764         return 0;
2765 }
2766
2767 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2768                   int mirror_num, int async_submit)
2769 {
2770         struct btrfs_mapping_tree *map_tree;
2771         struct btrfs_device *dev;
2772         struct bio *first_bio = bio;
2773         u64 logical = (u64)bio->bi_sector << 9;
2774         u64 length = 0;
2775         u64 map_length;
2776         struct btrfs_multi_bio *multi = NULL;
2777         int ret;
2778         int dev_nr = 0;
2779         int total_devs = 1;
2780
2781         length = bio->bi_size;
2782         map_tree = &root->fs_info->mapping_tree;
2783         map_length = length;
2784
2785         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2786                               mirror_num);
2787         BUG_ON(ret);
2788
2789         total_devs = multi->num_stripes;
2790         if (map_length < length) {
2791                 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
2792                        "len %llu\n", (unsigned long long)logical,
2793                        (unsigned long long)length,
2794                        (unsigned long long)map_length);
2795                 BUG();
2796         }
2797         multi->end_io = first_bio->bi_end_io;
2798         multi->private = first_bio->bi_private;
2799         multi->orig_bio = first_bio;
2800         atomic_set(&multi->stripes_pending, multi->num_stripes);
2801
2802         while (dev_nr < total_devs) {
2803                 if (total_devs > 1) {
2804                         if (dev_nr < total_devs - 1) {
2805                                 bio = bio_clone(first_bio, GFP_NOFS);
2806                                 BUG_ON(!bio);
2807                         } else {
2808                                 bio = first_bio;
2809                         }
2810                         bio->bi_private = multi;
2811                         bio->bi_end_io = end_bio_multi_stripe;
2812                 }
2813                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2814                 dev = multi->stripes[dev_nr].dev;
2815                 BUG_ON(rw == WRITE && !dev->writeable);
2816                 if (dev && dev->bdev) {
2817                         bio->bi_bdev = dev->bdev;
2818                         if (async_submit)
2819                                 schedule_bio(root, dev, rw, bio);
2820                         else
2821                                 submit_bio(rw, bio);
2822                 } else {
2823                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2824                         bio->bi_sector = logical >> 9;
2825                         bio_endio(bio, -EIO);
2826                 }
2827                 dev_nr++;
2828         }
2829         if (total_devs == 1)
2830                 kfree(multi);
2831         return 0;
2832 }
2833
2834 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2835                                        u8 *uuid, u8 *fsid)
2836 {
2837         struct btrfs_device *device;
2838         struct btrfs_fs_devices *cur_devices;
2839
2840         cur_devices = root->fs_info->fs_devices;
2841         while (cur_devices) {
2842                 if (!fsid ||
2843                     !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2844                         device = __find_device(&cur_devices->devices,
2845                                                devid, uuid);
2846                         if (device)
2847                                 return device;
2848                 }
2849                 cur_devices = cur_devices->seed;
2850         }
2851         return NULL;
2852 }
2853
2854 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2855                                             u64 devid, u8 *dev_uuid)
2856 {
2857         struct btrfs_device *device;
2858         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2859
2860         device = kzalloc(sizeof(*device), GFP_NOFS);
2861         if (!device)
2862                 return NULL;
2863         list_add(&device->dev_list,
2864                  &fs_devices->devices);
2865         device->barriers = 1;
2866         device->dev_root = root->fs_info->dev_root;
2867         device->devid = devid;
2868         device->work.func = pending_bios_fn;
2869         device->fs_devices = fs_devices;
2870         fs_devices->num_devices++;
2871         spin_lock_init(&device->io_lock);
2872         INIT_LIST_HEAD(&device->dev_alloc_list);
2873         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2874         return device;
2875 }
2876
2877 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2878                           struct extent_buffer *leaf,
2879                           struct btrfs_chunk *chunk)
2880 {
2881         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2882         struct map_lookup *map;
2883         struct extent_map *em;
2884         u64 logical;
2885         u64 length;
2886         u64 devid;
2887         u8 uuid[BTRFS_UUID_SIZE];
2888         int num_stripes;
2889         int ret;
2890         int i;
2891
2892         logical = key->offset;
2893         length = btrfs_chunk_length(leaf, chunk);
2894
2895         spin_lock(&map_tree->map_tree.lock);
2896         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2897         spin_unlock(&map_tree->map_tree.lock);
2898
2899         /* already mapped? */
2900         if (em && em->start <= logical && em->start + em->len > logical) {
2901                 free_extent_map(em);
2902                 return 0;
2903         } else if (em) {
2904                 free_extent_map(em);
2905         }
2906
2907         map = kzalloc(sizeof(*map), GFP_NOFS);
2908         if (!map)
2909                 return -ENOMEM;
2910
2911         em = alloc_extent_map(GFP_NOFS);
2912         if (!em)
2913                 return -ENOMEM;
2914         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2915         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2916         if (!map) {
2917                 free_extent_map(em);
2918                 return -ENOMEM;
2919         }
2920
2921         em->bdev = (struct block_device *)map;
2922         em->start = logical;
2923         em->len = length;
2924         em->block_start = 0;
2925         em->block_len = em->len;
2926
2927         map->num_stripes = num_stripes;
2928         map->io_width = btrfs_chunk_io_width(leaf, chunk);
2929         map->io_align = btrfs_chunk_io_align(leaf, chunk);
2930         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2931         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2932         map->type = btrfs_chunk_type(leaf, chunk);
2933         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2934         for (i = 0; i < num_stripes; i++) {
2935                 map->stripes[i].physical =
2936                         btrfs_stripe_offset_nr(leaf, chunk, i);
2937                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2938                 read_extent_buffer(leaf, uuid, (unsigned long)
2939                                    btrfs_stripe_dev_uuid_nr(chunk, i),
2940                                    BTRFS_UUID_SIZE);
2941                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
2942                                                         NULL);
2943                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2944                         kfree(map);
2945                         free_extent_map(em);
2946                         return -EIO;
2947                 }
2948                 if (!map->stripes[i].dev) {
2949                         map->stripes[i].dev =
2950                                 add_missing_dev(root, devid, uuid);
2951                         if (!map->stripes[i].dev) {
2952                                 kfree(map);
2953                                 free_extent_map(em);
2954                                 return -EIO;
2955                         }
2956                 }
2957                 map->stripes[i].dev->in_fs_metadata = 1;
2958         }
2959
2960         spin_lock(&map_tree->map_tree.lock);
2961         ret = add_extent_mapping(&map_tree->map_tree, em);
2962         spin_unlock(&map_tree->map_tree.lock);
2963         BUG_ON(ret);
2964         free_extent_map(em);
2965
2966         return 0;
2967 }
2968
2969 static int fill_device_from_item(struct extent_buffer *leaf,
2970                                  struct btrfs_dev_item *dev_item,
2971                                  struct btrfs_device *device)
2972 {
2973         unsigned long ptr;
2974
2975         device->devid = btrfs_device_id(leaf, dev_item);
2976         device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2977         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2978         device->type = btrfs_device_type(leaf, dev_item);
2979         device->io_align = btrfs_device_io_align(leaf, dev_item);
2980         device->io_width = btrfs_device_io_width(leaf, dev_item);
2981         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2982
2983         ptr = (unsigned long)btrfs_device_uuid(dev_item);
2984         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2985
2986         return 0;
2987 }
2988
2989 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
2990 {
2991         struct btrfs_fs_devices *fs_devices;
2992         int ret;
2993
2994         mutex_lock(&uuid_mutex);
2995
2996         fs_devices = root->fs_info->fs_devices->seed;
2997         while (fs_devices) {
2998                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
2999                         ret = 0;
3000                         goto out;
3001                 }
3002                 fs_devices = fs_devices->seed;
3003         }
3004
3005         fs_devices = find_fsid(fsid);
3006         if (!fs_devices) {
3007                 ret = -ENOENT;
3008                 goto out;
3009         }
3010
3011         fs_devices = clone_fs_devices(fs_devices);
3012         if (IS_ERR(fs_devices)) {
3013                 ret = PTR_ERR(fs_devices);
3014                 goto out;
3015         }
3016
3017         ret = __btrfs_open_devices(fs_devices, FMODE_READ,
3018                                    root->fs_info->bdev_holder);
3019         if (ret)
3020                 goto out;
3021
3022         if (!fs_devices->seeding) {
3023                 __btrfs_close_devices(fs_devices);
3024                 free_fs_devices(fs_devices);
3025                 ret = -EINVAL;
3026                 goto out;
3027         }
3028
3029         fs_devices->seed = root->fs_info->fs_devices->seed;
3030         root->fs_info->fs_devices->seed = fs_devices;
3031 out:
3032         mutex_unlock(&uuid_mutex);
3033         return ret;
3034 }
3035
3036 static int read_one_dev(struct btrfs_root *root,
3037                         struct extent_buffer *leaf,
3038                         struct btrfs_dev_item *dev_item)
3039 {
3040         struct btrfs_device *device;
3041         u64 devid;
3042         int ret;
3043         u8 fs_uuid[BTRFS_UUID_SIZE];
3044         u8 dev_uuid[BTRFS_UUID_SIZE];
3045
3046         devid = btrfs_device_id(leaf, dev_item);
3047         read_extent_buffer(leaf, dev_uuid,
3048                            (unsigned long)btrfs_device_uuid(dev_item),
3049                            BTRFS_UUID_SIZE);
3050         read_extent_buffer(leaf, fs_uuid,
3051                            (unsigned long)btrfs_device_fsid(dev_item),
3052                            BTRFS_UUID_SIZE);
3053
3054         if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
3055                 ret = open_seed_devices(root, fs_uuid);
3056                 if (ret && !btrfs_test_opt(root, DEGRADED))
3057                         return ret;
3058         }
3059
3060         device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
3061         if (!device || !device->bdev) {
3062                 if (!btrfs_test_opt(root, DEGRADED))
3063                         return -EIO;
3064
3065                 if (!device) {
3066                         printk(KERN_WARNING "warning devid %llu missing\n",
3067                                (unsigned long long)devid);
3068                         device = add_missing_dev(root, devid, dev_uuid);
3069                         if (!device)
3070                                 return -ENOMEM;
3071                 }
3072         }
3073
3074         if (device->fs_devices != root->fs_info->fs_devices) {
3075                 BUG_ON(device->writeable);
3076                 if (device->generation !=
3077                     btrfs_device_generation(leaf, dev_item))
3078                         return -EINVAL;
3079         }
3080
3081         fill_device_from_item(leaf, dev_item, device);
3082         device->dev_root = root->fs_info->dev_root;
3083         device->in_fs_metadata = 1;
3084         if (device->writeable)
3085                 device->fs_devices->total_rw_bytes += device->total_bytes;
3086         ret = 0;
3087         return ret;
3088 }
3089
3090 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
3091 {
3092         struct btrfs_dev_item *dev_item;
3093
3094         dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
3095                                                      dev_item);
3096         return read_one_dev(root, buf, dev_item);
3097 }
3098
3099 int btrfs_read_sys_array(struct btrfs_root *root)
3100 {
3101         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
3102         struct extent_buffer *sb;
3103         struct btrfs_disk_key *disk_key;
3104         struct btrfs_chunk *chunk;
3105         u8 *ptr;
3106         unsigned long sb_ptr;
3107         int ret = 0;
3108         u32 num_stripes;
3109         u32 array_size;
3110         u32 len = 0;
3111         u32 cur;
3112         struct btrfs_key key;
3113
3114         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
3115                                           BTRFS_SUPER_INFO_SIZE);
3116         if (!sb)
3117                 return -ENOMEM;
3118         btrfs_set_buffer_uptodate(sb);
3119         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
3120         array_size = btrfs_super_sys_array_size(super_copy);
3121
3122         ptr = super_copy->sys_chunk_array;
3123         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
3124         cur = 0;
3125
3126         while (cur < array_size) {
3127                 disk_key = (struct btrfs_disk_key *)ptr;
3128                 btrfs_disk_key_to_cpu(&key, disk_key);
3129
3130                 len = sizeof(*disk_key); ptr += len;
3131                 sb_ptr += len;
3132                 cur += len;
3133
3134                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
3135                         chunk = (struct btrfs_chunk *)sb_ptr;
3136                         ret = read_one_chunk(root, &key, sb, chunk);
3137                         if (ret)
3138                                 break;
3139                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
3140                         len = btrfs_chunk_item_size(num_stripes);
3141                 } else {
3142                         ret = -EIO;
3143                         break;
3144                 }
3145                 ptr += len;
3146                 sb_ptr += len;
3147                 cur += len;
3148         }
3149         free_extent_buffer(sb);
3150         return ret;
3151 }
3152
3153 int btrfs_read_chunk_tree(struct btrfs_root *root)
3154 {
3155         struct btrfs_path *path;
3156         struct extent_buffer *leaf;
3157         struct btrfs_key key;
3158         struct btrfs_key found_key;
3159         int ret;
3160         int slot;
3161
3162         root = root->fs_info->chunk_root;
3163
3164         path = btrfs_alloc_path();
3165         if (!path)
3166                 return -ENOMEM;
3167
3168         /* first we search for all of the device items, and then we
3169          * read in all of the chunk items.  This way we can create chunk
3170          * mappings that reference all of the devices that are afound
3171          */
3172         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
3173         key.offset = 0;
3174         key.type = 0;
3175 again:
3176         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3177         while (1) {
3178                 leaf = path->nodes[0];
3179                 slot = path->slots[0];
3180                 if (slot >= btrfs_header_nritems(leaf)) {
3181                         ret = btrfs_next_leaf(root, path);
3182                         if (ret == 0)
3183                                 continue;
3184                         if (ret < 0)
3185                                 goto error;
3186                         break;
3187                 }
3188                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3189                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3190                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
3191                                 break;
3192                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
3193                                 struct btrfs_dev_item *dev_item;
3194                                 dev_item = btrfs_item_ptr(leaf, slot,
3195                                                   struct btrfs_dev_item);
3196                                 ret = read_one_dev(root, leaf, dev_item);
3197                                 if (ret)
3198                                         goto error;
3199                         }
3200                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
3201                         struct btrfs_chunk *chunk;
3202                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3203                         ret = read_one_chunk(root, &found_key, leaf, chunk);
3204                         if (ret)
3205                                 goto error;
3206                 }
3207                 path->slots[0]++;
3208         }
3209         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
3210                 key.objectid = 0;
3211                 btrfs_release_path(root, path);
3212                 goto again;
3213         }
3214         ret = 0;
3215 error:
3216         btrfs_free_path(path);
3217         return ret;
3218 }