Btrfs: remove #if 0 code
[linux-2.6] / fs / btrfs / disk-io.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include "compat.h"
30 #include "crc32c.h"
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "volumes.h"
36 #include "print-tree.h"
37 #include "async-thread.h"
38 #include "locking.h"
39 #include "ref-cache.h"
40 #include "tree-log.h"
41 #include "free-space-cache.h"
42
43 static struct extent_io_ops btree_extent_io_ops;
44 static void end_workqueue_fn(struct btrfs_work *work);
45
46 /*
47  * end_io_wq structs are used to do processing in task context when an IO is
48  * complete.  This is used during reads to verify checksums, and it is used
49  * by writes to insert metadata for new file extents after IO is complete.
50  */
51 struct end_io_wq {
52         struct bio *bio;
53         bio_end_io_t *end_io;
54         void *private;
55         struct btrfs_fs_info *info;
56         int error;
57         int metadata;
58         struct list_head list;
59         struct btrfs_work work;
60 };
61
62 /*
63  * async submit bios are used to offload expensive checksumming
64  * onto the worker threads.  They checksum file and metadata bios
65  * just before they are sent down the IO stack.
66  */
67 struct async_submit_bio {
68         struct inode *inode;
69         struct bio *bio;
70         struct list_head list;
71         extent_submit_bio_hook_t *submit_bio_start;
72         extent_submit_bio_hook_t *submit_bio_done;
73         int rw;
74         int mirror_num;
75         unsigned long bio_flags;
76         struct btrfs_work work;
77 };
78
79 /* These are used to set the lockdep class on the extent buffer locks.
80  * The class is set by the readpage_end_io_hook after the buffer has
81  * passed csum validation but before the pages are unlocked.
82  *
83  * The lockdep class is also set by btrfs_init_new_buffer on freshly
84  * allocated blocks.
85  *
86  * The class is based on the level in the tree block, which allows lockdep
87  * to know that lower nodes nest inside the locks of higher nodes.
88  *
89  * We also add a check to make sure the highest level of the tree is
90  * the same as our lockdep setup here.  If BTRFS_MAX_LEVEL changes, this
91  * code needs update as well.
92  */
93 #ifdef CONFIG_DEBUG_LOCK_ALLOC
94 # if BTRFS_MAX_LEVEL != 8
95 #  error
96 # endif
97 static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
98 static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
99         /* leaf */
100         "btrfs-extent-00",
101         "btrfs-extent-01",
102         "btrfs-extent-02",
103         "btrfs-extent-03",
104         "btrfs-extent-04",
105         "btrfs-extent-05",
106         "btrfs-extent-06",
107         "btrfs-extent-07",
108         /* highest possible level */
109         "btrfs-extent-08",
110 };
111 #endif
112
113 /*
114  * extents on the btree inode are pretty simple, there's one extent
115  * that covers the entire device
116  */
117 static struct extent_map *btree_get_extent(struct inode *inode,
118                 struct page *page, size_t page_offset, u64 start, u64 len,
119                 int create)
120 {
121         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
122         struct extent_map *em;
123         int ret;
124
125         spin_lock(&em_tree->lock);
126         em = lookup_extent_mapping(em_tree, start, len);
127         if (em) {
128                 em->bdev =
129                         BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
130                 spin_unlock(&em_tree->lock);
131                 goto out;
132         }
133         spin_unlock(&em_tree->lock);
134
135         em = alloc_extent_map(GFP_NOFS);
136         if (!em) {
137                 em = ERR_PTR(-ENOMEM);
138                 goto out;
139         }
140         em->start = 0;
141         em->len = (u64)-1;
142         em->block_len = (u64)-1;
143         em->block_start = 0;
144         em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
145
146         spin_lock(&em_tree->lock);
147         ret = add_extent_mapping(em_tree, em);
148         if (ret == -EEXIST) {
149                 u64 failed_start = em->start;
150                 u64 failed_len = em->len;
151
152                 free_extent_map(em);
153                 em = lookup_extent_mapping(em_tree, start, len);
154                 if (em) {
155                         ret = 0;
156                 } else {
157                         em = lookup_extent_mapping(em_tree, failed_start,
158                                                    failed_len);
159                         ret = -EIO;
160                 }
161         } else if (ret) {
162                 free_extent_map(em);
163                 em = NULL;
164         }
165         spin_unlock(&em_tree->lock);
166
167         if (ret)
168                 em = ERR_PTR(ret);
169 out:
170         return em;
171 }
172
173 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
174 {
175         return btrfs_crc32c(seed, data, len);
176 }
177
178 void btrfs_csum_final(u32 crc, char *result)
179 {
180         *(__le32 *)result = ~cpu_to_le32(crc);
181 }
182
183 /*
184  * compute the csum for a btree block, and either verify it or write it
185  * into the csum field of the block.
186  */
187 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
188                            int verify)
189 {
190         u16 csum_size =
191                 btrfs_super_csum_size(&root->fs_info->super_copy);
192         char *result = NULL;
193         unsigned long len;
194         unsigned long cur_len;
195         unsigned long offset = BTRFS_CSUM_SIZE;
196         char *map_token = NULL;
197         char *kaddr;
198         unsigned long map_start;
199         unsigned long map_len;
200         int err;
201         u32 crc = ~(u32)0;
202         unsigned long inline_result;
203
204         len = buf->len - offset;
205         while (len > 0) {
206                 err = map_private_extent_buffer(buf, offset, 32,
207                                         &map_token, &kaddr,
208                                         &map_start, &map_len, KM_USER0);
209                 if (err)
210                         return 1;
211                 cur_len = min(len, map_len - (offset - map_start));
212                 crc = btrfs_csum_data(root, kaddr + offset - map_start,
213                                       crc, cur_len);
214                 len -= cur_len;
215                 offset += cur_len;
216                 unmap_extent_buffer(buf, map_token, KM_USER0);
217         }
218         if (csum_size > sizeof(inline_result)) {
219                 result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
220                 if (!result)
221                         return 1;
222         } else {
223                 result = (char *)&inline_result;
224         }
225
226         btrfs_csum_final(crc, result);
227
228         if (verify) {
229                 if (memcmp_extent_buffer(buf, result, 0, csum_size)) {
230                         u32 val;
231                         u32 found = 0;
232                         memcpy(&found, result, csum_size);
233
234                         read_extent_buffer(buf, &val, 0, csum_size);
235                         printk(KERN_INFO "btrfs: %s checksum verify failed "
236                                "on %llu wanted %X found %X level %d\n",
237                                root->fs_info->sb->s_id,
238                                buf->start, val, found, btrfs_header_level(buf));
239                         if (result != (char *)&inline_result)
240                                 kfree(result);
241                         return 1;
242                 }
243         } else {
244                 write_extent_buffer(buf, result, 0, csum_size);
245         }
246         if (result != (char *)&inline_result)
247                 kfree(result);
248         return 0;
249 }
250
251 /*
252  * we can't consider a given block up to date unless the transid of the
253  * block matches the transid in the parent node's pointer.  This is how we
254  * detect blocks that either didn't get written at all or got written
255  * in the wrong place.
256  */
257 static int verify_parent_transid(struct extent_io_tree *io_tree,
258                                  struct extent_buffer *eb, u64 parent_transid)
259 {
260         int ret;
261
262         if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
263                 return 0;
264
265         lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
266         if (extent_buffer_uptodate(io_tree, eb) &&
267             btrfs_header_generation(eb) == parent_transid) {
268                 ret = 0;
269                 goto out;
270         }
271         printk("parent transid verify failed on %llu wanted %llu found %llu\n",
272                (unsigned long long)eb->start,
273                (unsigned long long)parent_transid,
274                (unsigned long long)btrfs_header_generation(eb));
275         ret = 1;
276         clear_extent_buffer_uptodate(io_tree, eb);
277 out:
278         unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
279                       GFP_NOFS);
280         return ret;
281 }
282
283 /*
284  * helper to read a given tree block, doing retries as required when
285  * the checksums don't match and we have alternate mirrors to try.
286  */
287 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
288                                           struct extent_buffer *eb,
289                                           u64 start, u64 parent_transid)
290 {
291         struct extent_io_tree *io_tree;
292         int ret;
293         int num_copies = 0;
294         int mirror_num = 0;
295
296         io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
297         while (1) {
298                 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
299                                                btree_get_extent, mirror_num);
300                 if (!ret &&
301                     !verify_parent_transid(io_tree, eb, parent_transid))
302                         return ret;
303
304                 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
305                                               eb->start, eb->len);
306                 if (num_copies == 1)
307                         return ret;
308
309                 mirror_num++;
310                 if (mirror_num > num_copies)
311                         return ret;
312         }
313         return -EIO;
314 }
315
316 /*
317  * checksum a dirty tree block before IO.  This has extra checks to make sure
318  * we only fill in the checksum field in the first page of a multi-page block
319  */
320
321 static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
322 {
323         struct extent_io_tree *tree;
324         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
325         u64 found_start;
326         int found_level;
327         unsigned long len;
328         struct extent_buffer *eb;
329         int ret;
330
331         tree = &BTRFS_I(page->mapping->host)->io_tree;
332
333         if (page->private == EXTENT_PAGE_PRIVATE)
334                 goto out;
335         if (!page->private)
336                 goto out;
337         len = page->private >> 2;
338         WARN_ON(len == 0);
339
340         eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
341         ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
342                                              btrfs_header_generation(eb));
343         BUG_ON(ret);
344         found_start = btrfs_header_bytenr(eb);
345         if (found_start != start) {
346                 WARN_ON(1);
347                 goto err;
348         }
349         if (eb->first_page != page) {
350                 WARN_ON(1);
351                 goto err;
352         }
353         if (!PageUptodate(page)) {
354                 WARN_ON(1);
355                 goto err;
356         }
357         found_level = btrfs_header_level(eb);
358
359         csum_tree_block(root, eb, 0);
360 err:
361         free_extent_buffer(eb);
362 out:
363         return 0;
364 }
365
366 static int check_tree_block_fsid(struct btrfs_root *root,
367                                  struct extent_buffer *eb)
368 {
369         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
370         u8 fsid[BTRFS_UUID_SIZE];
371         int ret = 1;
372
373         read_extent_buffer(eb, fsid, (unsigned long)btrfs_header_fsid(eb),
374                            BTRFS_FSID_SIZE);
375         while (fs_devices) {
376                 if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
377                         ret = 0;
378                         break;
379                 }
380                 fs_devices = fs_devices->seed;
381         }
382         return ret;
383 }
384
385 #ifdef CONFIG_DEBUG_LOCK_ALLOC
386 void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
387 {
388         lockdep_set_class_and_name(&eb->lock,
389                            &btrfs_eb_class[level],
390                            btrfs_eb_name[level]);
391 }
392 #endif
393
394 static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
395                                struct extent_state *state)
396 {
397         struct extent_io_tree *tree;
398         u64 found_start;
399         int found_level;
400         unsigned long len;
401         struct extent_buffer *eb;
402         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
403         int ret = 0;
404
405         tree = &BTRFS_I(page->mapping->host)->io_tree;
406         if (page->private == EXTENT_PAGE_PRIVATE)
407                 goto out;
408         if (!page->private)
409                 goto out;
410
411         len = page->private >> 2;
412         WARN_ON(len == 0);
413
414         eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
415
416         found_start = btrfs_header_bytenr(eb);
417         if (found_start != start) {
418                 printk(KERN_INFO "btrfs bad tree block start %llu %llu\n",
419                        (unsigned long long)found_start,
420                        (unsigned long long)eb->start);
421                 ret = -EIO;
422                 goto err;
423         }
424         if (eb->first_page != page) {
425                 printk(KERN_INFO "btrfs bad first page %lu %lu\n",
426                        eb->first_page->index, page->index);
427                 WARN_ON(1);
428                 ret = -EIO;
429                 goto err;
430         }
431         if (check_tree_block_fsid(root, eb)) {
432                 printk(KERN_INFO "btrfs bad fsid on block %llu\n",
433                        (unsigned long long)eb->start);
434                 ret = -EIO;
435                 goto err;
436         }
437         found_level = btrfs_header_level(eb);
438
439         btrfs_set_buffer_lockdep_class(eb, found_level);
440
441         ret = csum_tree_block(root, eb, 1);
442         if (ret)
443                 ret = -EIO;
444
445         end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
446         end = eb->start + end - 1;
447 err:
448         free_extent_buffer(eb);
449 out:
450         return ret;
451 }
452
453 static void end_workqueue_bio(struct bio *bio, int err)
454 {
455         struct end_io_wq *end_io_wq = bio->bi_private;
456         struct btrfs_fs_info *fs_info;
457
458         fs_info = end_io_wq->info;
459         end_io_wq->error = err;
460         end_io_wq->work.func = end_workqueue_fn;
461         end_io_wq->work.flags = 0;
462
463         if (bio->bi_rw & (1 << BIO_RW)) {
464                 if (end_io_wq->metadata)
465                         btrfs_queue_worker(&fs_info->endio_meta_write_workers,
466                                            &end_io_wq->work);
467                 else
468                         btrfs_queue_worker(&fs_info->endio_write_workers,
469                                            &end_io_wq->work);
470         } else {
471                 if (end_io_wq->metadata)
472                         btrfs_queue_worker(&fs_info->endio_meta_workers,
473                                            &end_io_wq->work);
474                 else
475                         btrfs_queue_worker(&fs_info->endio_workers,
476                                            &end_io_wq->work);
477         }
478 }
479
480 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
481                         int metadata)
482 {
483         struct end_io_wq *end_io_wq;
484         end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
485         if (!end_io_wq)
486                 return -ENOMEM;
487
488         end_io_wq->private = bio->bi_private;
489         end_io_wq->end_io = bio->bi_end_io;
490         end_io_wq->info = info;
491         end_io_wq->error = 0;
492         end_io_wq->bio = bio;
493         end_io_wq->metadata = metadata;
494
495         bio->bi_private = end_io_wq;
496         bio->bi_end_io = end_workqueue_bio;
497         return 0;
498 }
499
500 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
501 {
502         unsigned long limit = min_t(unsigned long,
503                                     info->workers.max_workers,
504                                     info->fs_devices->open_devices);
505         return 256 * limit;
506 }
507
508 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
509 {
510         return atomic_read(&info->nr_async_bios) >
511                 btrfs_async_submit_limit(info);
512 }
513
514 static void run_one_async_start(struct btrfs_work *work)
515 {
516         struct btrfs_fs_info *fs_info;
517         struct async_submit_bio *async;
518
519         async = container_of(work, struct  async_submit_bio, work);
520         fs_info = BTRFS_I(async->inode)->root->fs_info;
521         async->submit_bio_start(async->inode, async->rw, async->bio,
522                                async->mirror_num, async->bio_flags);
523 }
524
525 static void run_one_async_done(struct btrfs_work *work)
526 {
527         struct btrfs_fs_info *fs_info;
528         struct async_submit_bio *async;
529         int limit;
530
531         async = container_of(work, struct  async_submit_bio, work);
532         fs_info = BTRFS_I(async->inode)->root->fs_info;
533
534         limit = btrfs_async_submit_limit(fs_info);
535         limit = limit * 2 / 3;
536
537         atomic_dec(&fs_info->nr_async_submits);
538
539         if (atomic_read(&fs_info->nr_async_submits) < limit &&
540             waitqueue_active(&fs_info->async_submit_wait))
541                 wake_up(&fs_info->async_submit_wait);
542
543         async->submit_bio_done(async->inode, async->rw, async->bio,
544                                async->mirror_num, async->bio_flags);
545 }
546
547 static void run_one_async_free(struct btrfs_work *work)
548 {
549         struct async_submit_bio *async;
550
551         async = container_of(work, struct  async_submit_bio, work);
552         kfree(async);
553 }
554
555 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
556                         int rw, struct bio *bio, int mirror_num,
557                         unsigned long bio_flags,
558                         extent_submit_bio_hook_t *submit_bio_start,
559                         extent_submit_bio_hook_t *submit_bio_done)
560 {
561         struct async_submit_bio *async;
562
563         async = kmalloc(sizeof(*async), GFP_NOFS);
564         if (!async)
565                 return -ENOMEM;
566
567         async->inode = inode;
568         async->rw = rw;
569         async->bio = bio;
570         async->mirror_num = mirror_num;
571         async->submit_bio_start = submit_bio_start;
572         async->submit_bio_done = submit_bio_done;
573
574         async->work.func = run_one_async_start;
575         async->work.ordered_func = run_one_async_done;
576         async->work.ordered_free = run_one_async_free;
577
578         async->work.flags = 0;
579         async->bio_flags = bio_flags;
580
581         atomic_inc(&fs_info->nr_async_submits);
582
583         if (rw & (1 << BIO_RW_SYNCIO))
584                 btrfs_set_work_high_prio(&async->work);
585
586         btrfs_queue_worker(&fs_info->workers, &async->work);
587
588         while (atomic_read(&fs_info->async_submit_draining) &&
589               atomic_read(&fs_info->nr_async_submits)) {
590                 wait_event(fs_info->async_submit_wait,
591                            (atomic_read(&fs_info->nr_async_submits) == 0));
592         }
593
594         return 0;
595 }
596
597 static int btree_csum_one_bio(struct bio *bio)
598 {
599         struct bio_vec *bvec = bio->bi_io_vec;
600         int bio_index = 0;
601         struct btrfs_root *root;
602
603         WARN_ON(bio->bi_vcnt <= 0);
604         while (bio_index < bio->bi_vcnt) {
605                 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
606                 csum_dirty_buffer(root, bvec->bv_page);
607                 bio_index++;
608                 bvec++;
609         }
610         return 0;
611 }
612
613 static int __btree_submit_bio_start(struct inode *inode, int rw,
614                                     struct bio *bio, int mirror_num,
615                                     unsigned long bio_flags)
616 {
617         /*
618          * when we're called for a write, we're already in the async
619          * submission context.  Just jump into btrfs_map_bio
620          */
621         btree_csum_one_bio(bio);
622         return 0;
623 }
624
625 static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
626                                  int mirror_num, unsigned long bio_flags)
627 {
628         /*
629          * when we're called for a write, we're already in the async
630          * submission context.  Just jump into btrfs_map_bio
631          */
632         return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
633 }
634
635 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
636                                  int mirror_num, unsigned long bio_flags)
637 {
638         int ret;
639
640         ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
641                                           bio, 1);
642         BUG_ON(ret);
643
644         if (!(rw & (1 << BIO_RW))) {
645                 /*
646                  * called for a read, do the setup so that checksum validation
647                  * can happen in the async kernel threads
648                  */
649                 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
650                                      mirror_num, 0);
651         }
652
653         /*
654          * kthread helpers are used to submit writes so that checksumming
655          * can happen in parallel across all CPUs
656          */
657         return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
658                                    inode, rw, bio, mirror_num, 0,
659                                    __btree_submit_bio_start,
660                                    __btree_submit_bio_done);
661 }
662
663 static int btree_writepage(struct page *page, struct writeback_control *wbc)
664 {
665         struct extent_io_tree *tree;
666         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
667         struct extent_buffer *eb;
668         int was_dirty;
669
670         tree = &BTRFS_I(page->mapping->host)->io_tree;
671         if (!(current->flags & PF_MEMALLOC)) {
672                 return extent_write_full_page(tree, page,
673                                               btree_get_extent, wbc);
674         }
675
676         redirty_page_for_writepage(wbc, page);
677         eb = btrfs_find_tree_block(root, page_offset(page),
678                                       PAGE_CACHE_SIZE);
679         WARN_ON(!eb);
680
681         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
682         if (!was_dirty) {
683                 spin_lock(&root->fs_info->delalloc_lock);
684                 root->fs_info->dirty_metadata_bytes += PAGE_CACHE_SIZE;
685                 spin_unlock(&root->fs_info->delalloc_lock);
686         }
687         free_extent_buffer(eb);
688
689         unlock_page(page);
690         return 0;
691 }
692
693 static int btree_writepages(struct address_space *mapping,
694                             struct writeback_control *wbc)
695 {
696         struct extent_io_tree *tree;
697         tree = &BTRFS_I(mapping->host)->io_tree;
698         if (wbc->sync_mode == WB_SYNC_NONE) {
699                 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
700                 u64 num_dirty;
701                 unsigned long thresh = 32 * 1024 * 1024;
702
703                 if (wbc->for_kupdate)
704                         return 0;
705
706                 /* this is a bit racy, but that's ok */
707                 num_dirty = root->fs_info->dirty_metadata_bytes;
708                 if (num_dirty < thresh)
709                         return 0;
710         }
711         return extent_writepages(tree, mapping, btree_get_extent, wbc);
712 }
713
714 static int btree_readpage(struct file *file, struct page *page)
715 {
716         struct extent_io_tree *tree;
717         tree = &BTRFS_I(page->mapping->host)->io_tree;
718         return extent_read_full_page(tree, page, btree_get_extent);
719 }
720
721 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
722 {
723         struct extent_io_tree *tree;
724         struct extent_map_tree *map;
725         int ret;
726
727         if (PageWriteback(page) || PageDirty(page))
728                 return 0;
729
730         tree = &BTRFS_I(page->mapping->host)->io_tree;
731         map = &BTRFS_I(page->mapping->host)->extent_tree;
732
733         ret = try_release_extent_state(map, tree, page, gfp_flags);
734         if (!ret)
735                 return 0;
736
737         ret = try_release_extent_buffer(tree, page);
738         if (ret == 1) {
739                 ClearPagePrivate(page);
740                 set_page_private(page, 0);
741                 page_cache_release(page);
742         }
743
744         return ret;
745 }
746
747 static void btree_invalidatepage(struct page *page, unsigned long offset)
748 {
749         struct extent_io_tree *tree;
750         tree = &BTRFS_I(page->mapping->host)->io_tree;
751         extent_invalidatepage(tree, page, offset);
752         btree_releasepage(page, GFP_NOFS);
753         if (PagePrivate(page)) {
754                 printk(KERN_WARNING "btrfs warning page private not zero "
755                        "on page %llu\n", (unsigned long long)page_offset(page));
756                 ClearPagePrivate(page);
757                 set_page_private(page, 0);
758                 page_cache_release(page);
759         }
760 }
761
762 static struct address_space_operations btree_aops = {
763         .readpage       = btree_readpage,
764         .writepage      = btree_writepage,
765         .writepages     = btree_writepages,
766         .releasepage    = btree_releasepage,
767         .invalidatepage = btree_invalidatepage,
768         .sync_page      = block_sync_page,
769 };
770
771 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
772                          u64 parent_transid)
773 {
774         struct extent_buffer *buf = NULL;
775         struct inode *btree_inode = root->fs_info->btree_inode;
776         int ret = 0;
777
778         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
779         if (!buf)
780                 return 0;
781         read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
782                                  buf, 0, 0, btree_get_extent, 0);
783         free_extent_buffer(buf);
784         return ret;
785 }
786
787 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
788                                             u64 bytenr, u32 blocksize)
789 {
790         struct inode *btree_inode = root->fs_info->btree_inode;
791         struct extent_buffer *eb;
792         eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
793                                 bytenr, blocksize, GFP_NOFS);
794         return eb;
795 }
796
797 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
798                                                  u64 bytenr, u32 blocksize)
799 {
800         struct inode *btree_inode = root->fs_info->btree_inode;
801         struct extent_buffer *eb;
802
803         eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
804                                  bytenr, blocksize, NULL, GFP_NOFS);
805         return eb;
806 }
807
808
809 int btrfs_write_tree_block(struct extent_buffer *buf)
810 {
811         return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start,
812                                       buf->start + buf->len - 1, WB_SYNC_ALL);
813 }
814
815 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
816 {
817         return btrfs_wait_on_page_writeback_range(buf->first_page->mapping,
818                                   buf->start, buf->start + buf->len - 1);
819 }
820
821 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
822                                       u32 blocksize, u64 parent_transid)
823 {
824         struct extent_buffer *buf = NULL;
825         struct inode *btree_inode = root->fs_info->btree_inode;
826         struct extent_io_tree *io_tree;
827         int ret;
828
829         io_tree = &BTRFS_I(btree_inode)->io_tree;
830
831         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
832         if (!buf)
833                 return NULL;
834
835         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
836
837         if (ret == 0)
838                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
839         else
840                 WARN_ON(1);
841         return buf;
842
843 }
844
845 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
846                      struct extent_buffer *buf)
847 {
848         struct inode *btree_inode = root->fs_info->btree_inode;
849         if (btrfs_header_generation(buf) ==
850             root->fs_info->running_transaction->transid) {
851                 btrfs_assert_tree_locked(buf);
852
853                 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
854                         spin_lock(&root->fs_info->delalloc_lock);
855                         if (root->fs_info->dirty_metadata_bytes >= buf->len)
856                                 root->fs_info->dirty_metadata_bytes -= buf->len;
857                         else
858                                 WARN_ON(1);
859                         spin_unlock(&root->fs_info->delalloc_lock);
860                 }
861
862                 /* ugh, clear_extent_buffer_dirty needs to lock the page */
863                 btrfs_set_lock_blocking(buf);
864                 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
865                                           buf);
866         }
867         return 0;
868 }
869
870 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
871                         u32 stripesize, struct btrfs_root *root,
872                         struct btrfs_fs_info *fs_info,
873                         u64 objectid)
874 {
875         root->node = NULL;
876         root->commit_root = NULL;
877         root->ref_tree = NULL;
878         root->sectorsize = sectorsize;
879         root->nodesize = nodesize;
880         root->leafsize = leafsize;
881         root->stripesize = stripesize;
882         root->ref_cows = 0;
883         root->track_dirty = 0;
884
885         root->fs_info = fs_info;
886         root->objectid = objectid;
887         root->last_trans = 0;
888         root->highest_inode = 0;
889         root->last_inode_alloc = 0;
890         root->name = NULL;
891         root->in_sysfs = 0;
892
893         INIT_LIST_HEAD(&root->dirty_list);
894         INIT_LIST_HEAD(&root->orphan_list);
895         INIT_LIST_HEAD(&root->dead_list);
896         spin_lock_init(&root->node_lock);
897         spin_lock_init(&root->list_lock);
898         mutex_init(&root->objectid_mutex);
899         mutex_init(&root->log_mutex);
900         init_waitqueue_head(&root->log_writer_wait);
901         init_waitqueue_head(&root->log_commit_wait[0]);
902         init_waitqueue_head(&root->log_commit_wait[1]);
903         atomic_set(&root->log_commit[0], 0);
904         atomic_set(&root->log_commit[1], 0);
905         atomic_set(&root->log_writers, 0);
906         root->log_batch = 0;
907         root->log_transid = 0;
908         extent_io_tree_init(&root->dirty_log_pages,
909                              fs_info->btree_inode->i_mapping, GFP_NOFS);
910
911         btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
912         root->ref_tree = &root->ref_tree_struct;
913
914         memset(&root->root_key, 0, sizeof(root->root_key));
915         memset(&root->root_item, 0, sizeof(root->root_item));
916         memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
917         memset(&root->root_kobj, 0, sizeof(root->root_kobj));
918         root->defrag_trans_start = fs_info->generation;
919         init_completion(&root->kobj_unregister);
920         root->defrag_running = 0;
921         root->defrag_level = 0;
922         root->root_key.objectid = objectid;
923         root->anon_super.s_root = NULL;
924         root->anon_super.s_dev = 0;
925         INIT_LIST_HEAD(&root->anon_super.s_list);
926         INIT_LIST_HEAD(&root->anon_super.s_instances);
927         init_rwsem(&root->anon_super.s_umount);
928
929         return 0;
930 }
931
932 static int find_and_setup_root(struct btrfs_root *tree_root,
933                                struct btrfs_fs_info *fs_info,
934                                u64 objectid,
935                                struct btrfs_root *root)
936 {
937         int ret;
938         u32 blocksize;
939         u64 generation;
940
941         __setup_root(tree_root->nodesize, tree_root->leafsize,
942                      tree_root->sectorsize, tree_root->stripesize,
943                      root, fs_info, objectid);
944         ret = btrfs_find_last_root(tree_root, objectid,
945                                    &root->root_item, &root->root_key);
946         BUG_ON(ret);
947
948         generation = btrfs_root_generation(&root->root_item);
949         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
950         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
951                                      blocksize, generation);
952         BUG_ON(!root->node);
953         return 0;
954 }
955
956 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
957                              struct btrfs_fs_info *fs_info)
958 {
959         struct extent_buffer *eb;
960         struct btrfs_root *log_root_tree = fs_info->log_root_tree;
961         u64 start = 0;
962         u64 end = 0;
963         int ret;
964
965         if (!log_root_tree)
966                 return 0;
967
968         while (1) {
969                 ret = find_first_extent_bit(&log_root_tree->dirty_log_pages,
970                                     0, &start, &end, EXTENT_DIRTY);
971                 if (ret)
972                         break;
973
974                 clear_extent_dirty(&log_root_tree->dirty_log_pages,
975                                    start, end, GFP_NOFS);
976         }
977         eb = fs_info->log_root_tree->node;
978
979         WARN_ON(btrfs_header_level(eb) != 0);
980         WARN_ON(btrfs_header_nritems(eb) != 0);
981
982         ret = btrfs_free_reserved_extent(fs_info->tree_root,
983                                 eb->start, eb->len);
984         BUG_ON(ret);
985
986         free_extent_buffer(eb);
987         kfree(fs_info->log_root_tree);
988         fs_info->log_root_tree = NULL;
989         return 0;
990 }
991
992 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
993                                          struct btrfs_fs_info *fs_info)
994 {
995         struct btrfs_root *root;
996         struct btrfs_root *tree_root = fs_info->tree_root;
997         struct extent_buffer *leaf;
998
999         root = kzalloc(sizeof(*root), GFP_NOFS);
1000         if (!root)
1001                 return ERR_PTR(-ENOMEM);
1002
1003         __setup_root(tree_root->nodesize, tree_root->leafsize,
1004                      tree_root->sectorsize, tree_root->stripesize,
1005                      root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1006
1007         root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
1008         root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1009         root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
1010         /*
1011          * log trees do not get reference counted because they go away
1012          * before a real commit is actually done.  They do store pointers
1013          * to file data extents, and those reference counts still get
1014          * updated (along with back refs to the log tree).
1015          */
1016         root->ref_cows = 0;
1017
1018         leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
1019                                       0, BTRFS_TREE_LOG_OBJECTID,
1020                                       trans->transid, 0, 0, 0);
1021         if (IS_ERR(leaf)) {
1022                 kfree(root);
1023                 return ERR_CAST(leaf);
1024         }
1025
1026         root->node = leaf;
1027         btrfs_set_header_nritems(root->node, 0);
1028         btrfs_set_header_level(root->node, 0);
1029         btrfs_set_header_bytenr(root->node, root->node->start);
1030         btrfs_set_header_generation(root->node, trans->transid);
1031         btrfs_set_header_owner(root->node, BTRFS_TREE_LOG_OBJECTID);
1032
1033         write_extent_buffer(root->node, root->fs_info->fsid,
1034                             (unsigned long)btrfs_header_fsid(root->node),
1035                             BTRFS_FSID_SIZE);
1036         btrfs_mark_buffer_dirty(root->node);
1037         btrfs_tree_unlock(root->node);
1038         return root;
1039 }
1040
1041 int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
1042                              struct btrfs_fs_info *fs_info)
1043 {
1044         struct btrfs_root *log_root;
1045
1046         log_root = alloc_log_tree(trans, fs_info);
1047         if (IS_ERR(log_root))
1048                 return PTR_ERR(log_root);
1049         WARN_ON(fs_info->log_root_tree);
1050         fs_info->log_root_tree = log_root;
1051         return 0;
1052 }
1053
1054 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
1055                        struct btrfs_root *root)
1056 {
1057         struct btrfs_root *log_root;
1058         struct btrfs_inode_item *inode_item;
1059
1060         log_root = alloc_log_tree(trans, root->fs_info);
1061         if (IS_ERR(log_root))
1062                 return PTR_ERR(log_root);
1063
1064         log_root->last_trans = trans->transid;
1065         log_root->root_key.offset = root->root_key.objectid;
1066
1067         inode_item = &log_root->root_item.inode;
1068         inode_item->generation = cpu_to_le64(1);
1069         inode_item->size = cpu_to_le64(3);
1070         inode_item->nlink = cpu_to_le32(1);
1071         inode_item->nbytes = cpu_to_le64(root->leafsize);
1072         inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
1073
1074         btrfs_set_root_bytenr(&log_root->root_item, log_root->node->start);
1075         btrfs_set_root_generation(&log_root->root_item, trans->transid);
1076
1077         WARN_ON(root->log_root);
1078         root->log_root = log_root;
1079         root->log_transid = 0;
1080         return 0;
1081 }
1082
1083 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1084                                                struct btrfs_key *location)
1085 {
1086         struct btrfs_root *root;
1087         struct btrfs_fs_info *fs_info = tree_root->fs_info;
1088         struct btrfs_path *path;
1089         struct extent_buffer *l;
1090         u64 highest_inode;
1091         u64 generation;
1092         u32 blocksize;
1093         int ret = 0;
1094
1095         root = kzalloc(sizeof(*root), GFP_NOFS);
1096         if (!root)
1097                 return ERR_PTR(-ENOMEM);
1098         if (location->offset == (u64)-1) {
1099                 ret = find_and_setup_root(tree_root, fs_info,
1100                                           location->objectid, root);
1101                 if (ret) {
1102                         kfree(root);
1103                         return ERR_PTR(ret);
1104                 }
1105                 goto insert;
1106         }
1107
1108         __setup_root(tree_root->nodesize, tree_root->leafsize,
1109                      tree_root->sectorsize, tree_root->stripesize,
1110                      root, fs_info, location->objectid);
1111
1112         path = btrfs_alloc_path();
1113         BUG_ON(!path);
1114         ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
1115         if (ret != 0) {
1116                 if (ret > 0)
1117                         ret = -ENOENT;
1118                 goto out;
1119         }
1120         l = path->nodes[0];
1121         read_extent_buffer(l, &root->root_item,
1122                btrfs_item_ptr_offset(l, path->slots[0]),
1123                sizeof(root->root_item));
1124         memcpy(&root->root_key, location, sizeof(*location));
1125         ret = 0;
1126 out:
1127         btrfs_release_path(root, path);
1128         btrfs_free_path(path);
1129         if (ret) {
1130                 kfree(root);
1131                 return ERR_PTR(ret);
1132         }
1133         generation = btrfs_root_generation(&root->root_item);
1134         blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
1135         root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
1136                                      blocksize, generation);
1137         BUG_ON(!root->node);
1138 insert:
1139         if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1140                 root->ref_cows = 1;
1141                 ret = btrfs_find_highest_inode(root, &highest_inode);
1142                 if (ret == 0) {
1143                         root->highest_inode = highest_inode;
1144                         root->last_inode_alloc = highest_inode;
1145                 }
1146         }
1147         return root;
1148 }
1149
1150 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1151                                         u64 root_objectid)
1152 {
1153         struct btrfs_root *root;
1154
1155         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
1156                 return fs_info->tree_root;
1157         if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
1158                 return fs_info->extent_root;
1159
1160         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1161                                  (unsigned long)root_objectid);
1162         return root;
1163 }
1164
1165 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
1166                                               struct btrfs_key *location)
1167 {
1168         struct btrfs_root *root;
1169         int ret;
1170
1171         if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1172                 return fs_info->tree_root;
1173         if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
1174                 return fs_info->extent_root;
1175         if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
1176                 return fs_info->chunk_root;
1177         if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
1178                 return fs_info->dev_root;
1179         if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
1180                 return fs_info->csum_root;
1181
1182         root = radix_tree_lookup(&fs_info->fs_roots_radix,
1183                                  (unsigned long)location->objectid);
1184         if (root)
1185                 return root;
1186
1187         root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
1188         if (IS_ERR(root))
1189                 return root;
1190
1191         set_anon_super(&root->anon_super, NULL);
1192
1193         ret = radix_tree_insert(&fs_info->fs_roots_radix,
1194                                 (unsigned long)root->root_key.objectid,
1195                                 root);
1196         if (ret) {
1197                 free_extent_buffer(root->node);
1198                 kfree(root);
1199                 return ERR_PTR(ret);
1200         }
1201         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
1202                 ret = btrfs_find_dead_roots(fs_info->tree_root,
1203                                             root->root_key.objectid, root);
1204                 BUG_ON(ret);
1205                 btrfs_orphan_cleanup(root);
1206         }
1207         return root;
1208 }
1209
1210 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
1211                                       struct btrfs_key *location,
1212                                       const char *name, int namelen)
1213 {
1214         struct btrfs_root *root;
1215         int ret;
1216
1217         root = btrfs_read_fs_root_no_name(fs_info, location);
1218         if (!root)
1219                 return NULL;
1220
1221         if (root->in_sysfs)
1222                 return root;
1223
1224         ret = btrfs_set_root_name(root, name, namelen);
1225         if (ret) {
1226                 free_extent_buffer(root->node);
1227                 kfree(root);
1228                 return ERR_PTR(ret);
1229         }
1230 #if 0
1231         ret = btrfs_sysfs_add_root(root);
1232         if (ret) {
1233                 free_extent_buffer(root->node);
1234                 kfree(root->name);
1235                 kfree(root);
1236                 return ERR_PTR(ret);
1237         }
1238 #endif
1239         root->in_sysfs = 1;
1240         return root;
1241 }
1242
1243 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1244 {
1245         struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
1246         int ret = 0;
1247         struct btrfs_device *device;
1248         struct backing_dev_info *bdi;
1249
1250         list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1251                 if (!device->bdev)
1252                         continue;
1253                 bdi = blk_get_backing_dev_info(device->bdev);
1254                 if (bdi && bdi_congested(bdi, bdi_bits)) {
1255                         ret = 1;
1256                         break;
1257                 }
1258         }
1259         return ret;
1260 }
1261
1262 /*
1263  * this unplugs every device on the box, and it is only used when page
1264  * is null
1265  */
1266 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1267 {
1268         struct btrfs_device *device;
1269         struct btrfs_fs_info *info;
1270
1271         info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1272         list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
1273                 if (!device->bdev)
1274                         continue;
1275
1276                 bdi = blk_get_backing_dev_info(device->bdev);
1277                 if (bdi->unplug_io_fn)
1278                         bdi->unplug_io_fn(bdi, page);
1279         }
1280 }
1281
1282 static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1283 {
1284         struct inode *inode;
1285         struct extent_map_tree *em_tree;
1286         struct extent_map *em;
1287         struct address_space *mapping;
1288         u64 offset;
1289
1290         /* the generic O_DIRECT read code does this */
1291         if (1 || !page) {
1292                 __unplug_io_fn(bdi, page);
1293                 return;
1294         }
1295
1296         /*
1297          * page->mapping may change at any time.  Get a consistent copy
1298          * and use that for everything below
1299          */
1300         smp_mb();
1301         mapping = page->mapping;
1302         if (!mapping)
1303                 return;
1304
1305         inode = mapping->host;
1306
1307         /*
1308          * don't do the expensive searching for a small number of
1309          * devices
1310          */
1311         if (BTRFS_I(inode)->root->fs_info->fs_devices->open_devices <= 2) {
1312                 __unplug_io_fn(bdi, page);
1313                 return;
1314         }
1315
1316         offset = page_offset(page);
1317
1318         em_tree = &BTRFS_I(inode)->extent_tree;
1319         spin_lock(&em_tree->lock);
1320         em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1321         spin_unlock(&em_tree->lock);
1322         if (!em) {
1323                 __unplug_io_fn(bdi, page);
1324                 return;
1325         }
1326
1327         if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1328                 free_extent_map(em);
1329                 __unplug_io_fn(bdi, page);
1330                 return;
1331         }
1332         offset = offset - em->start;
1333         btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1334                           em->block_start + offset, page);
1335         free_extent_map(em);
1336 }
1337
1338 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1339 {
1340         bdi_init(bdi);
1341         bdi->ra_pages   = default_backing_dev_info.ra_pages;
1342         bdi->state              = 0;
1343         bdi->capabilities       = default_backing_dev_info.capabilities;
1344         bdi->unplug_io_fn       = btrfs_unplug_io_fn;
1345         bdi->unplug_io_data     = info;
1346         bdi->congested_fn       = btrfs_congested_fn;
1347         bdi->congested_data     = info;
1348         return 0;
1349 }
1350
1351 static int bio_ready_for_csum(struct bio *bio)
1352 {
1353         u64 length = 0;
1354         u64 buf_len = 0;
1355         u64 start = 0;
1356         struct page *page;
1357         struct extent_io_tree *io_tree = NULL;
1358         struct btrfs_fs_info *info = NULL;
1359         struct bio_vec *bvec;
1360         int i;
1361         int ret;
1362
1363         bio_for_each_segment(bvec, bio, i) {
1364                 page = bvec->bv_page;
1365                 if (page->private == EXTENT_PAGE_PRIVATE) {
1366                         length += bvec->bv_len;
1367                         continue;
1368                 }
1369                 if (!page->private) {
1370                         length += bvec->bv_len;
1371                         continue;
1372                 }
1373                 length = bvec->bv_len;
1374                 buf_len = page->private >> 2;
1375                 start = page_offset(page) + bvec->bv_offset;
1376                 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1377                 info = BTRFS_I(page->mapping->host)->root->fs_info;
1378         }
1379         /* are we fully contained in this bio? */
1380         if (buf_len <= length)
1381                 return 1;
1382
1383         ret = extent_range_uptodate(io_tree, start + length,
1384                                     start + buf_len - 1);
1385         return ret;
1386 }
1387
1388 /*
1389  * called by the kthread helper functions to finally call the bio end_io
1390  * functions.  This is where read checksum verification actually happens
1391  */
1392 static void end_workqueue_fn(struct btrfs_work *work)
1393 {
1394         struct bio *bio;
1395         struct end_io_wq *end_io_wq;
1396         struct btrfs_fs_info *fs_info;
1397         int error;
1398
1399         end_io_wq = container_of(work, struct end_io_wq, work);
1400         bio = end_io_wq->bio;
1401         fs_info = end_io_wq->info;
1402
1403         /* metadata bio reads are special because the whole tree block must
1404          * be checksummed at once.  This makes sure the entire block is in
1405          * ram and up to date before trying to verify things.  For
1406          * blocksize <= pagesize, it is basically a noop
1407          */
1408         if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1409             !bio_ready_for_csum(bio)) {
1410                 btrfs_queue_worker(&fs_info->endio_meta_workers,
1411                                    &end_io_wq->work);
1412                 return;
1413         }
1414         error = end_io_wq->error;
1415         bio->bi_private = end_io_wq->private;
1416         bio->bi_end_io = end_io_wq->end_io;
1417         kfree(end_io_wq);
1418         bio_endio(bio, error);
1419 }
1420
1421 static int cleaner_kthread(void *arg)
1422 {
1423         struct btrfs_root *root = arg;
1424
1425         do {
1426                 smp_mb();
1427                 if (root->fs_info->closing)
1428                         break;
1429
1430                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1431                 mutex_lock(&root->fs_info->cleaner_mutex);
1432                 btrfs_clean_old_snapshots(root);
1433                 mutex_unlock(&root->fs_info->cleaner_mutex);
1434
1435                 if (freezing(current)) {
1436                         refrigerator();
1437                 } else {
1438                         smp_mb();
1439                         if (root->fs_info->closing)
1440                                 break;
1441                         set_current_state(TASK_INTERRUPTIBLE);
1442                         schedule();
1443                         __set_current_state(TASK_RUNNING);
1444                 }
1445         } while (!kthread_should_stop());
1446         return 0;
1447 }
1448
1449 static int transaction_kthread(void *arg)
1450 {
1451         struct btrfs_root *root = arg;
1452         struct btrfs_trans_handle *trans;
1453         struct btrfs_transaction *cur;
1454         unsigned long now;
1455         unsigned long delay;
1456         int ret;
1457
1458         do {
1459                 smp_mb();
1460                 if (root->fs_info->closing)
1461                         break;
1462
1463                 delay = HZ * 30;
1464                 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1465                 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1466
1467                 mutex_lock(&root->fs_info->trans_mutex);
1468                 cur = root->fs_info->running_transaction;
1469                 if (!cur) {
1470                         mutex_unlock(&root->fs_info->trans_mutex);
1471                         goto sleep;
1472                 }
1473
1474                 now = get_seconds();
1475                 if (now < cur->start_time || now - cur->start_time < 30) {
1476                         mutex_unlock(&root->fs_info->trans_mutex);
1477                         delay = HZ * 5;
1478                         goto sleep;
1479                 }
1480                 mutex_unlock(&root->fs_info->trans_mutex);
1481                 trans = btrfs_start_transaction(root, 1);
1482                 ret = btrfs_commit_transaction(trans, root);
1483
1484 sleep:
1485                 wake_up_process(root->fs_info->cleaner_kthread);
1486                 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1487
1488                 if (freezing(current)) {
1489                         refrigerator();
1490                 } else {
1491                         if (root->fs_info->closing)
1492                                 break;
1493                         set_current_state(TASK_INTERRUPTIBLE);
1494                         schedule_timeout(delay);
1495                         __set_current_state(TASK_RUNNING);
1496                 }
1497         } while (!kthread_should_stop());
1498         return 0;
1499 }
1500
1501 struct btrfs_root *open_ctree(struct super_block *sb,
1502                               struct btrfs_fs_devices *fs_devices,
1503                               char *options)
1504 {
1505         u32 sectorsize;
1506         u32 nodesize;
1507         u32 leafsize;
1508         u32 blocksize;
1509         u32 stripesize;
1510         u64 generation;
1511         u64 features;
1512         struct btrfs_key location;
1513         struct buffer_head *bh;
1514         struct btrfs_root *extent_root = kzalloc(sizeof(struct btrfs_root),
1515                                                  GFP_NOFS);
1516         struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1517                                                  GFP_NOFS);
1518         struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root),
1519                                                GFP_NOFS);
1520         struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1521                                                 GFP_NOFS);
1522         struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1523                                                 GFP_NOFS);
1524         struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
1525                                               GFP_NOFS);
1526         struct btrfs_root *log_tree_root;
1527
1528         int ret;
1529         int err = -EINVAL;
1530
1531         struct btrfs_super_block *disk_super;
1532
1533         if (!extent_root || !tree_root || !fs_info ||
1534             !chunk_root || !dev_root || !csum_root) {
1535                 err = -ENOMEM;
1536                 goto fail;
1537         }
1538         INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1539         INIT_LIST_HEAD(&fs_info->trans_list);
1540         INIT_LIST_HEAD(&fs_info->dead_roots);
1541         INIT_LIST_HEAD(&fs_info->hashers);
1542         INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1543         INIT_LIST_HEAD(&fs_info->ordered_operations);
1544         spin_lock_init(&fs_info->delalloc_lock);
1545         spin_lock_init(&fs_info->new_trans_lock);
1546         spin_lock_init(&fs_info->ref_cache_lock);
1547
1548         init_completion(&fs_info->kobj_unregister);
1549         fs_info->tree_root = tree_root;
1550         fs_info->extent_root = extent_root;
1551         fs_info->csum_root = csum_root;
1552         fs_info->chunk_root = chunk_root;
1553         fs_info->dev_root = dev_root;
1554         fs_info->fs_devices = fs_devices;
1555         INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1556         INIT_LIST_HEAD(&fs_info->space_info);
1557         btrfs_mapping_init(&fs_info->mapping_tree);
1558         atomic_set(&fs_info->nr_async_submits, 0);
1559         atomic_set(&fs_info->async_delalloc_pages, 0);
1560         atomic_set(&fs_info->async_submit_draining, 0);
1561         atomic_set(&fs_info->nr_async_bios, 0);
1562         atomic_set(&fs_info->throttles, 0);
1563         atomic_set(&fs_info->throttle_gen, 0);
1564         fs_info->sb = sb;
1565         fs_info->max_extent = (u64)-1;
1566         fs_info->max_inline = 8192 * 1024;
1567         setup_bdi(fs_info, &fs_info->bdi);
1568         fs_info->btree_inode = new_inode(sb);
1569         fs_info->btree_inode->i_ino = 1;
1570         fs_info->btree_inode->i_nlink = 1;
1571         fs_info->metadata_ratio = 8;
1572
1573         fs_info->thread_pool_size = min_t(unsigned long,
1574                                           num_online_cpus() + 2, 8);
1575
1576         INIT_LIST_HEAD(&fs_info->ordered_extents);
1577         spin_lock_init(&fs_info->ordered_extent_lock);
1578
1579         sb->s_blocksize = 4096;
1580         sb->s_blocksize_bits = blksize_bits(4096);
1581
1582         /*
1583          * we set the i_size on the btree inode to the max possible int.
1584          * the real end of the address space is determined by all of
1585          * the devices in the system
1586          */
1587         fs_info->btree_inode->i_size = OFFSET_MAX;
1588         fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1589         fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1590
1591         extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1592                              fs_info->btree_inode->i_mapping,
1593                              GFP_NOFS);
1594         extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1595                              GFP_NOFS);
1596
1597         BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1598
1599         spin_lock_init(&fs_info->block_group_cache_lock);
1600         fs_info->block_group_cache_tree.rb_node = NULL;
1601
1602         extent_io_tree_init(&fs_info->pinned_extents,
1603                              fs_info->btree_inode->i_mapping, GFP_NOFS);
1604         fs_info->do_barriers = 1;
1605
1606         INIT_LIST_HEAD(&fs_info->dead_reloc_roots);
1607         btrfs_leaf_ref_tree_init(&fs_info->reloc_ref_tree);
1608         btrfs_leaf_ref_tree_init(&fs_info->shared_ref_tree);
1609
1610         BTRFS_I(fs_info->btree_inode)->root = tree_root;
1611         memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1612                sizeof(struct btrfs_key));
1613         insert_inode_hash(fs_info->btree_inode);
1614
1615         mutex_init(&fs_info->trans_mutex);
1616         mutex_init(&fs_info->ordered_operations_mutex);
1617         mutex_init(&fs_info->tree_log_mutex);
1618         mutex_init(&fs_info->drop_mutex);
1619         mutex_init(&fs_info->chunk_mutex);
1620         mutex_init(&fs_info->transaction_kthread_mutex);
1621         mutex_init(&fs_info->cleaner_mutex);
1622         mutex_init(&fs_info->volume_mutex);
1623         mutex_init(&fs_info->tree_reloc_mutex);
1624
1625         btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
1626         btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
1627
1628         init_waitqueue_head(&fs_info->transaction_throttle);
1629         init_waitqueue_head(&fs_info->transaction_wait);
1630         init_waitqueue_head(&fs_info->async_submit_wait);
1631
1632         __setup_root(4096, 4096, 4096, 4096, tree_root,
1633                      fs_info, BTRFS_ROOT_TREE_OBJECTID);
1634
1635
1636         bh = btrfs_read_dev_super(fs_devices->latest_bdev);
1637         if (!bh)
1638                 goto fail_iput;
1639
1640         memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1641         memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
1642                sizeof(fs_info->super_for_commit));
1643         brelse(bh);
1644
1645         memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1646
1647         disk_super = &fs_info->super_copy;
1648         if (!btrfs_super_root(disk_super))
1649                 goto fail_iput;
1650
1651         ret = btrfs_parse_options(tree_root, options);
1652         if (ret) {
1653                 err = ret;
1654                 goto fail_iput;
1655         }
1656
1657         features = btrfs_super_incompat_flags(disk_super) &
1658                 ~BTRFS_FEATURE_INCOMPAT_SUPP;
1659         if (features) {
1660                 printk(KERN_ERR "BTRFS: couldn't mount because of "
1661                        "unsupported optional features (%Lx).\n",
1662                        features);
1663                 err = -EINVAL;
1664                 goto fail_iput;
1665         }
1666
1667         features = btrfs_super_compat_ro_flags(disk_super) &
1668                 ~BTRFS_FEATURE_COMPAT_RO_SUPP;
1669         if (!(sb->s_flags & MS_RDONLY) && features) {
1670                 printk(KERN_ERR "BTRFS: couldn't mount RDWR because of "
1671                        "unsupported option features (%Lx).\n",
1672                        features);
1673                 err = -EINVAL;
1674                 goto fail_iput;
1675         }
1676
1677         /*
1678          * we need to start all the end_io workers up front because the
1679          * queue work function gets called at interrupt time, and so it
1680          * cannot dynamically grow.
1681          */
1682         btrfs_init_workers(&fs_info->workers, "worker",
1683                            fs_info->thread_pool_size);
1684
1685         btrfs_init_workers(&fs_info->delalloc_workers, "delalloc",
1686                            fs_info->thread_pool_size);
1687
1688         btrfs_init_workers(&fs_info->submit_workers, "submit",
1689                            min_t(u64, fs_devices->num_devices,
1690                            fs_info->thread_pool_size));
1691
1692         /* a higher idle thresh on the submit workers makes it much more
1693          * likely that bios will be send down in a sane order to the
1694          * devices
1695          */
1696         fs_info->submit_workers.idle_thresh = 64;
1697
1698         fs_info->workers.idle_thresh = 16;
1699         fs_info->workers.ordered = 1;
1700
1701         fs_info->delalloc_workers.idle_thresh = 2;
1702         fs_info->delalloc_workers.ordered = 1;
1703
1704         btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1705         btrfs_init_workers(&fs_info->endio_workers, "endio",
1706                            fs_info->thread_pool_size);
1707         btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1708                            fs_info->thread_pool_size);
1709         btrfs_init_workers(&fs_info->endio_meta_write_workers,
1710                            "endio-meta-write", fs_info->thread_pool_size);
1711         btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1712                            fs_info->thread_pool_size);
1713
1714         /*
1715          * endios are largely parallel and should have a very
1716          * low idle thresh
1717          */
1718         fs_info->endio_workers.idle_thresh = 4;
1719         fs_info->endio_meta_workers.idle_thresh = 4;
1720
1721         fs_info->endio_write_workers.idle_thresh = 64;
1722         fs_info->endio_meta_write_workers.idle_thresh = 64;
1723
1724         btrfs_start_workers(&fs_info->workers, 1);
1725         btrfs_start_workers(&fs_info->submit_workers, 1);
1726         btrfs_start_workers(&fs_info->delalloc_workers, 1);
1727         btrfs_start_workers(&fs_info->fixup_workers, 1);
1728         btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1729         btrfs_start_workers(&fs_info->endio_meta_workers,
1730                             fs_info->thread_pool_size);
1731         btrfs_start_workers(&fs_info->endio_meta_write_workers,
1732                             fs_info->thread_pool_size);
1733         btrfs_start_workers(&fs_info->endio_write_workers,
1734                             fs_info->thread_pool_size);
1735
1736         fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1737         fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
1738                                     4 * 1024 * 1024 / PAGE_CACHE_SIZE);
1739
1740         nodesize = btrfs_super_nodesize(disk_super);
1741         leafsize = btrfs_super_leafsize(disk_super);
1742         sectorsize = btrfs_super_sectorsize(disk_super);
1743         stripesize = btrfs_super_stripesize(disk_super);
1744         tree_root->nodesize = nodesize;
1745         tree_root->leafsize = leafsize;
1746         tree_root->sectorsize = sectorsize;
1747         tree_root->stripesize = stripesize;
1748
1749         sb->s_blocksize = sectorsize;
1750         sb->s_blocksize_bits = blksize_bits(sectorsize);
1751
1752         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1753                     sizeof(disk_super->magic))) {
1754                 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
1755                 goto fail_sb_buffer;
1756         }
1757
1758         mutex_lock(&fs_info->chunk_mutex);
1759         ret = btrfs_read_sys_array(tree_root);
1760         mutex_unlock(&fs_info->chunk_mutex);
1761         if (ret) {
1762                 printk(KERN_WARNING "btrfs: failed to read the system "
1763                        "array on %s\n", sb->s_id);
1764                 goto fail_sys_array;
1765         }
1766
1767         blocksize = btrfs_level_size(tree_root,
1768                                      btrfs_super_chunk_root_level(disk_super));
1769         generation = btrfs_super_chunk_root_generation(disk_super);
1770
1771         __setup_root(nodesize, leafsize, sectorsize, stripesize,
1772                      chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1773
1774         chunk_root->node = read_tree_block(chunk_root,
1775                                            btrfs_super_chunk_root(disk_super),
1776                                            blocksize, generation);
1777         BUG_ON(!chunk_root->node);
1778
1779         read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1780            (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1781            BTRFS_UUID_SIZE);
1782
1783         mutex_lock(&fs_info->chunk_mutex);
1784         ret = btrfs_read_chunk_tree(chunk_root);
1785         mutex_unlock(&fs_info->chunk_mutex);
1786         if (ret) {
1787                 printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
1788                        sb->s_id);
1789                 goto fail_chunk_root;
1790         }
1791
1792         btrfs_close_extra_devices(fs_devices);
1793
1794         blocksize = btrfs_level_size(tree_root,
1795                                      btrfs_super_root_level(disk_super));
1796         generation = btrfs_super_generation(disk_super);
1797
1798         tree_root->node = read_tree_block(tree_root,
1799                                           btrfs_super_root(disk_super),
1800                                           blocksize, generation);
1801         if (!tree_root->node)
1802                 goto fail_chunk_root;
1803
1804
1805         ret = find_and_setup_root(tree_root, fs_info,
1806                                   BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1807         if (ret)
1808                 goto fail_tree_root;
1809         extent_root->track_dirty = 1;
1810
1811         ret = find_and_setup_root(tree_root, fs_info,
1812                                   BTRFS_DEV_TREE_OBJECTID, dev_root);
1813         dev_root->track_dirty = 1;
1814         if (ret)
1815                 goto fail_extent_root;
1816
1817         ret = find_and_setup_root(tree_root, fs_info,
1818                                   BTRFS_CSUM_TREE_OBJECTID, csum_root);
1819         if (ret)
1820                 goto fail_extent_root;
1821
1822         csum_root->track_dirty = 1;
1823
1824         btrfs_read_block_groups(extent_root);
1825
1826         fs_info->generation = generation;
1827         fs_info->last_trans_committed = generation;
1828         fs_info->data_alloc_profile = (u64)-1;
1829         fs_info->metadata_alloc_profile = (u64)-1;
1830         fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1831         fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1832                                                "btrfs-cleaner");
1833         if (IS_ERR(fs_info->cleaner_kthread))
1834                 goto fail_csum_root;
1835
1836         fs_info->transaction_kthread = kthread_run(transaction_kthread,
1837                                                    tree_root,
1838                                                    "btrfs-transaction");
1839         if (IS_ERR(fs_info->transaction_kthread))
1840                 goto fail_cleaner;
1841
1842         if (btrfs_super_log_root(disk_super) != 0) {
1843                 u64 bytenr = btrfs_super_log_root(disk_super);
1844
1845                 if (fs_devices->rw_devices == 0) {
1846                         printk(KERN_WARNING "Btrfs log replay required "
1847                                "on RO media\n");
1848                         err = -EIO;
1849                         goto fail_trans_kthread;
1850                 }
1851                 blocksize =
1852                      btrfs_level_size(tree_root,
1853                                       btrfs_super_log_root_level(disk_super));
1854
1855                 log_tree_root = kzalloc(sizeof(struct btrfs_root),
1856                                                       GFP_NOFS);
1857
1858                 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1859                              log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
1860
1861                 log_tree_root->node = read_tree_block(tree_root, bytenr,
1862                                                       blocksize,
1863                                                       generation + 1);
1864                 ret = btrfs_recover_log_trees(log_tree_root);
1865                 BUG_ON(ret);
1866
1867                 if (sb->s_flags & MS_RDONLY) {
1868                         ret =  btrfs_commit_super(tree_root);
1869                         BUG_ON(ret);
1870                 }
1871         }
1872
1873         if (!(sb->s_flags & MS_RDONLY)) {
1874                 ret = btrfs_cleanup_reloc_trees(tree_root);
1875                 BUG_ON(ret);
1876         }
1877
1878         location.objectid = BTRFS_FS_TREE_OBJECTID;
1879         location.type = BTRFS_ROOT_ITEM_KEY;
1880         location.offset = (u64)-1;
1881
1882         fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
1883         if (!fs_info->fs_root)
1884                 goto fail_trans_kthread;
1885         return tree_root;
1886
1887 fail_trans_kthread:
1888         kthread_stop(fs_info->transaction_kthread);
1889 fail_cleaner:
1890         kthread_stop(fs_info->cleaner_kthread);
1891
1892         /*
1893          * make sure we're done with the btree inode before we stop our
1894          * kthreads
1895          */
1896         filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1897         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1898
1899 fail_csum_root:
1900         free_extent_buffer(csum_root->node);
1901 fail_extent_root:
1902         free_extent_buffer(extent_root->node);
1903 fail_tree_root:
1904         free_extent_buffer(tree_root->node);
1905 fail_chunk_root:
1906         free_extent_buffer(chunk_root->node);
1907 fail_sys_array:
1908         free_extent_buffer(dev_root->node);
1909 fail_sb_buffer:
1910         btrfs_stop_workers(&fs_info->fixup_workers);
1911         btrfs_stop_workers(&fs_info->delalloc_workers);
1912         btrfs_stop_workers(&fs_info->workers);
1913         btrfs_stop_workers(&fs_info->endio_workers);
1914         btrfs_stop_workers(&fs_info->endio_meta_workers);
1915         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1916         btrfs_stop_workers(&fs_info->endio_write_workers);
1917         btrfs_stop_workers(&fs_info->submit_workers);
1918 fail_iput:
1919         invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
1920         iput(fs_info->btree_inode);
1921
1922         btrfs_close_devices(fs_info->fs_devices);
1923         btrfs_mapping_tree_free(&fs_info->mapping_tree);
1924         bdi_destroy(&fs_info->bdi);
1925
1926 fail:
1927         kfree(extent_root);
1928         kfree(tree_root);
1929         kfree(fs_info);
1930         kfree(chunk_root);
1931         kfree(dev_root);
1932         kfree(csum_root);
1933         return ERR_PTR(err);
1934 }
1935
1936 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1937 {
1938         char b[BDEVNAME_SIZE];
1939
1940         if (uptodate) {
1941                 set_buffer_uptodate(bh);
1942         } else {
1943                 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1944                         printk(KERN_WARNING "lost page write due to "
1945                                         "I/O error on %s\n",
1946                                        bdevname(bh->b_bdev, b));
1947                 }
1948                 /* note, we dont' set_buffer_write_io_error because we have
1949                  * our own ways of dealing with the IO errors
1950                  */
1951                 clear_buffer_uptodate(bh);
1952         }
1953         unlock_buffer(bh);
1954         put_bh(bh);
1955 }
1956
1957 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
1958 {
1959         struct buffer_head *bh;
1960         struct buffer_head *latest = NULL;
1961         struct btrfs_super_block *super;
1962         int i;
1963         u64 transid = 0;
1964         u64 bytenr;
1965
1966         /* we would like to check all the supers, but that would make
1967          * a btrfs mount succeed after a mkfs from a different FS.
1968          * So, we need to add a special mount option to scan for
1969          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1970          */
1971         for (i = 0; i < 1; i++) {
1972                 bytenr = btrfs_sb_offset(i);
1973                 if (bytenr + 4096 >= i_size_read(bdev->bd_inode))
1974                         break;
1975                 bh = __bread(bdev, bytenr / 4096, 4096);
1976                 if (!bh)
1977                         continue;
1978
1979                 super = (struct btrfs_super_block *)bh->b_data;
1980                 if (btrfs_super_bytenr(super) != bytenr ||
1981                     strncmp((char *)(&super->magic), BTRFS_MAGIC,
1982                             sizeof(super->magic))) {
1983                         brelse(bh);
1984                         continue;
1985                 }
1986
1987                 if (!latest || btrfs_super_generation(super) > transid) {
1988                         brelse(latest);
1989                         latest = bh;
1990                         transid = btrfs_super_generation(super);
1991                 } else {
1992                         brelse(bh);
1993                 }
1994         }
1995         return latest;
1996 }
1997
1998 static int write_dev_supers(struct btrfs_device *device,
1999                             struct btrfs_super_block *sb,
2000                             int do_barriers, int wait, int max_mirrors)
2001 {
2002         struct buffer_head *bh;
2003         int i;
2004         int ret;
2005         int errors = 0;
2006         u32 crc;
2007         u64 bytenr;
2008         int last_barrier = 0;
2009
2010         if (max_mirrors == 0)
2011                 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
2012
2013         /* make sure only the last submit_bh does a barrier */
2014         if (do_barriers) {
2015                 for (i = 0; i < max_mirrors; i++) {
2016                         bytenr = btrfs_sb_offset(i);
2017                         if (bytenr + BTRFS_SUPER_INFO_SIZE >=
2018                             device->total_bytes)
2019                                 break;
2020                         last_barrier = i;
2021                 }
2022         }
2023
2024         for (i = 0; i < max_mirrors; i++) {
2025                 bytenr = btrfs_sb_offset(i);
2026                 if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
2027                         break;
2028
2029                 if (wait) {
2030                         bh = __find_get_block(device->bdev, bytenr / 4096,
2031                                               BTRFS_SUPER_INFO_SIZE);
2032                         BUG_ON(!bh);
2033                         brelse(bh);
2034                         wait_on_buffer(bh);
2035                         if (buffer_uptodate(bh)) {
2036                                 brelse(bh);
2037                                 continue;
2038                         }
2039                 } else {
2040                         btrfs_set_super_bytenr(sb, bytenr);
2041
2042                         crc = ~(u32)0;
2043                         crc = btrfs_csum_data(NULL, (char *)sb +
2044                                               BTRFS_CSUM_SIZE, crc,
2045                                               BTRFS_SUPER_INFO_SIZE -
2046                                               BTRFS_CSUM_SIZE);
2047                         btrfs_csum_final(crc, sb->csum);
2048
2049                         bh = __getblk(device->bdev, bytenr / 4096,
2050                                       BTRFS_SUPER_INFO_SIZE);
2051                         memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
2052
2053                         set_buffer_uptodate(bh);
2054                         get_bh(bh);
2055                         lock_buffer(bh);
2056                         bh->b_end_io = btrfs_end_buffer_write_sync;
2057                 }
2058
2059                 if (i == last_barrier && do_barriers && device->barriers) {
2060                         ret = submit_bh(WRITE_BARRIER, bh);
2061                         if (ret == -EOPNOTSUPP) {
2062                                 printk("btrfs: disabling barriers on dev %s\n",
2063                                        device->name);
2064                                 set_buffer_uptodate(bh);
2065                                 device->barriers = 0;
2066                                 get_bh(bh);
2067                                 lock_buffer(bh);
2068                                 ret = submit_bh(WRITE_SYNC, bh);
2069                         }
2070                 } else {
2071                         ret = submit_bh(WRITE_SYNC, bh);
2072                 }
2073
2074                 if (!ret && wait) {
2075                         wait_on_buffer(bh);
2076                         if (!buffer_uptodate(bh))
2077                                 errors++;
2078                 } else if (ret) {
2079                         errors++;
2080                 }
2081                 if (wait)
2082                         brelse(bh);
2083         }
2084         return errors < i ? 0 : -1;
2085 }
2086
2087 int write_all_supers(struct btrfs_root *root, int max_mirrors)
2088 {
2089         struct list_head *head = &root->fs_info->fs_devices->devices;
2090         struct btrfs_device *dev;
2091         struct btrfs_super_block *sb;
2092         struct btrfs_dev_item *dev_item;
2093         int ret;
2094         int do_barriers;
2095         int max_errors;
2096         int total_errors = 0;
2097         u64 flags;
2098
2099         max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
2100         do_barriers = !btrfs_test_opt(root, NOBARRIER);
2101
2102         sb = &root->fs_info->super_for_commit;
2103         dev_item = &sb->dev_item;
2104         list_for_each_entry(dev, head, dev_list) {
2105                 if (!dev->bdev) {
2106                         total_errors++;
2107                         continue;
2108                 }
2109                 if (!dev->in_fs_metadata || !dev->writeable)
2110                         continue;
2111
2112                 btrfs_set_stack_device_generation(dev_item, 0);
2113                 btrfs_set_stack_device_type(dev_item, dev->type);
2114                 btrfs_set_stack_device_id(dev_item, dev->devid);
2115                 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
2116                 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
2117                 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
2118                 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
2119                 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
2120                 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
2121                 memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE);
2122
2123                 flags = btrfs_super_flags(sb);
2124                 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
2125
2126                 ret = write_dev_supers(dev, sb, do_barriers, 0, max_mirrors);
2127                 if (ret)
2128                         total_errors++;
2129         }
2130         if (total_errors > max_errors) {
2131                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2132                        total_errors);
2133                 BUG();
2134         }
2135
2136         total_errors = 0;
2137         list_for_each_entry(dev, head, dev_list) {
2138                 if (!dev->bdev)
2139                         continue;
2140                 if (!dev->in_fs_metadata || !dev->writeable)
2141                         continue;
2142
2143                 ret = write_dev_supers(dev, sb, do_barriers, 1, max_mirrors);
2144                 if (ret)
2145                         total_errors++;
2146         }
2147         if (total_errors > max_errors) {
2148                 printk(KERN_ERR "btrfs: %d errors while writing supers\n",
2149                        total_errors);
2150                 BUG();
2151         }
2152         return 0;
2153 }
2154
2155 int write_ctree_super(struct btrfs_trans_handle *trans,
2156                       struct btrfs_root *root, int max_mirrors)
2157 {
2158         int ret;
2159
2160         ret = write_all_supers(root, max_mirrors);
2161         return ret;
2162 }
2163
2164 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2165 {
2166         radix_tree_delete(&fs_info->fs_roots_radix,
2167                           (unsigned long)root->root_key.objectid);
2168         if (root->anon_super.s_dev) {
2169                 down_write(&root->anon_super.s_umount);
2170                 kill_anon_super(&root->anon_super);
2171         }
2172         if (root->node)
2173                 free_extent_buffer(root->node);
2174         if (root->commit_root)
2175                 free_extent_buffer(root->commit_root);
2176         kfree(root->name);
2177         kfree(root);
2178         return 0;
2179 }
2180
2181 static int del_fs_roots(struct btrfs_fs_info *fs_info)
2182 {
2183         int ret;
2184         struct btrfs_root *gang[8];
2185         int i;
2186
2187         while (1) {
2188                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2189                                              (void **)gang, 0,
2190                                              ARRAY_SIZE(gang));
2191                 if (!ret)
2192                         break;
2193                 for (i = 0; i < ret; i++)
2194                         btrfs_free_fs_root(fs_info, gang[i]);
2195         }
2196         return 0;
2197 }
2198
2199 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2200 {
2201         u64 root_objectid = 0;
2202         struct btrfs_root *gang[8];
2203         int i;
2204         int ret;
2205
2206         while (1) {
2207                 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2208                                              (void **)gang, root_objectid,
2209                                              ARRAY_SIZE(gang));
2210                 if (!ret)
2211                         break;
2212                 for (i = 0; i < ret; i++) {
2213                         root_objectid = gang[i]->root_key.objectid;
2214                         ret = btrfs_find_dead_roots(fs_info->tree_root,
2215                                                     root_objectid, gang[i]);
2216                         BUG_ON(ret);
2217                         btrfs_orphan_cleanup(gang[i]);
2218                 }
2219                 root_objectid++;
2220         }
2221         return 0;
2222 }
2223
2224 int btrfs_commit_super(struct btrfs_root *root)
2225 {
2226         struct btrfs_trans_handle *trans;
2227         int ret;
2228
2229         mutex_lock(&root->fs_info->cleaner_mutex);
2230         btrfs_clean_old_snapshots(root);
2231         mutex_unlock(&root->fs_info->cleaner_mutex);
2232         trans = btrfs_start_transaction(root, 1);
2233         ret = btrfs_commit_transaction(trans, root);
2234         BUG_ON(ret);
2235         /* run commit again to drop the original snapshot */
2236         trans = btrfs_start_transaction(root, 1);
2237         btrfs_commit_transaction(trans, root);
2238         ret = btrfs_write_and_wait_transaction(NULL, root);
2239         BUG_ON(ret);
2240
2241         ret = write_ctree_super(NULL, root, 0);
2242         return ret;
2243 }
2244
2245 int close_ctree(struct btrfs_root *root)
2246 {
2247         struct btrfs_fs_info *fs_info = root->fs_info;
2248         int ret;
2249
2250         fs_info->closing = 1;
2251         smp_mb();
2252
2253         kthread_stop(root->fs_info->transaction_kthread);
2254         kthread_stop(root->fs_info->cleaner_kthread);
2255
2256         if (!(fs_info->sb->s_flags & MS_RDONLY)) {
2257                 ret =  btrfs_commit_super(root);
2258                 if (ret)
2259                         printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
2260         }
2261
2262         if (fs_info->delalloc_bytes) {
2263                 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
2264                        fs_info->delalloc_bytes);
2265         }
2266         if (fs_info->total_ref_cache_size) {
2267                 printk(KERN_INFO "btrfs: at umount reference cache size %llu\n",
2268                        (unsigned long long)fs_info->total_ref_cache_size);
2269         }
2270
2271         if (fs_info->extent_root->node)
2272                 free_extent_buffer(fs_info->extent_root->node);
2273
2274         if (fs_info->tree_root->node)
2275                 free_extent_buffer(fs_info->tree_root->node);
2276
2277         if (root->fs_info->chunk_root->node)
2278                 free_extent_buffer(root->fs_info->chunk_root->node);
2279
2280         if (root->fs_info->dev_root->node)
2281                 free_extent_buffer(root->fs_info->dev_root->node);
2282
2283         if (root->fs_info->csum_root->node)
2284                 free_extent_buffer(root->fs_info->csum_root->node);
2285
2286         btrfs_free_block_groups(root->fs_info);
2287
2288         del_fs_roots(fs_info);
2289
2290         iput(fs_info->btree_inode);
2291
2292         btrfs_stop_workers(&fs_info->fixup_workers);
2293         btrfs_stop_workers(&fs_info->delalloc_workers);
2294         btrfs_stop_workers(&fs_info->workers);
2295         btrfs_stop_workers(&fs_info->endio_workers);
2296         btrfs_stop_workers(&fs_info->endio_meta_workers);
2297         btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2298         btrfs_stop_workers(&fs_info->endio_write_workers);
2299         btrfs_stop_workers(&fs_info->submit_workers);
2300
2301         btrfs_close_devices(fs_info->fs_devices);
2302         btrfs_mapping_tree_free(&fs_info->mapping_tree);
2303
2304         bdi_destroy(&fs_info->bdi);
2305
2306         kfree(fs_info->extent_root);
2307         kfree(fs_info->tree_root);
2308         kfree(fs_info->chunk_root);
2309         kfree(fs_info->dev_root);
2310         kfree(fs_info->csum_root);
2311         return 0;
2312 }
2313
2314 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
2315 {
2316         int ret;
2317         struct inode *btree_inode = buf->first_page->mapping->host;
2318
2319         ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
2320         if (!ret)
2321                 return ret;
2322
2323         ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
2324                                     parent_transid);
2325         return !ret;
2326 }
2327
2328 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
2329 {
2330         struct inode *btree_inode = buf->first_page->mapping->host;
2331         return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
2332                                           buf);
2333 }
2334
2335 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
2336 {
2337         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2338         u64 transid = btrfs_header_generation(buf);
2339         struct inode *btree_inode = root->fs_info->btree_inode;
2340         int was_dirty;
2341
2342         btrfs_assert_tree_locked(buf);
2343         if (transid != root->fs_info->generation) {
2344                 printk(KERN_CRIT "btrfs transid mismatch buffer %llu, "
2345                        "found %llu running %llu\n",
2346                         (unsigned long long)buf->start,
2347                         (unsigned long long)transid,
2348                         (unsigned long long)root->fs_info->generation);
2349                 WARN_ON(1);
2350         }
2351         was_dirty = set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
2352                                             buf);
2353         if (!was_dirty) {
2354                 spin_lock(&root->fs_info->delalloc_lock);
2355                 root->fs_info->dirty_metadata_bytes += buf->len;
2356                 spin_unlock(&root->fs_info->delalloc_lock);
2357         }
2358 }
2359
2360 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
2361 {
2362         /*
2363          * looks as though older kernels can get into trouble with
2364          * this code, they end up stuck in balance_dirty_pages forever
2365          */
2366         struct extent_io_tree *tree;
2367         u64 num_dirty;
2368         u64 start = 0;
2369         unsigned long thresh = 32 * 1024 * 1024;
2370         tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
2371
2372         if (current->flags & PF_MEMALLOC)
2373                 return;
2374
2375         num_dirty = count_range_bits(tree, &start, (u64)-1,
2376                                      thresh, EXTENT_DIRTY);
2377         if (num_dirty > thresh) {
2378                 balance_dirty_pages_ratelimited_nr(
2379                                    root->fs_info->btree_inode->i_mapping, 1);
2380         }
2381         return;
2382 }
2383
2384 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
2385 {
2386         struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
2387         int ret;
2388         ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
2389         if (ret == 0)
2390                 set_bit(EXTENT_BUFFER_UPTODATE, &buf->bflags);
2391         return ret;
2392 }
2393
2394 int btree_lock_page_hook(struct page *page)
2395 {
2396         struct inode *inode = page->mapping->host;
2397         struct btrfs_root *root = BTRFS_I(inode)->root;
2398         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2399         struct extent_buffer *eb;
2400         unsigned long len;
2401         u64 bytenr = page_offset(page);
2402
2403         if (page->private == EXTENT_PAGE_PRIVATE)
2404                 goto out;
2405
2406         len = page->private >> 2;
2407         eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
2408         if (!eb)
2409                 goto out;
2410
2411         btrfs_tree_lock(eb);
2412         btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
2413
2414         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
2415                 spin_lock(&root->fs_info->delalloc_lock);
2416                 if (root->fs_info->dirty_metadata_bytes >= eb->len)
2417                         root->fs_info->dirty_metadata_bytes -= eb->len;
2418                 else
2419                         WARN_ON(1);
2420                 spin_unlock(&root->fs_info->delalloc_lock);
2421         }
2422
2423         btrfs_tree_unlock(eb);
2424         free_extent_buffer(eb);
2425 out:
2426         lock_page(page);
2427         return 0;
2428 }
2429
2430 static struct extent_io_ops btree_extent_io_ops = {
2431         .write_cache_pages_lock_hook = btree_lock_page_hook,
2432         .readpage_end_io_hook = btree_readpage_end_io_hook,
2433         .submit_bio_hook = btree_submit_bio_hook,
2434         /* note we're sharing with inode.c for the merge bio hook */
2435         .merge_bio_hook = btrfs_merge_bio_hook,
2436 };