Btrfs: fix oops on page->mapping->host during writepage
[linux-2.6] / fs / btrfs / extent_io.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include "extent_io.h"
15 #include "extent_map.h"
16 #include "compat.h"
17 #include "ctree.h"
18 #include "btrfs_inode.h"
19
20 /* temporary define until extent_map moves out of btrfs */
21 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
22                                        unsigned long extra_flags,
23                                        void (*ctor)(void *, struct kmem_cache *,
24                                                     unsigned long));
25
26 static struct kmem_cache *extent_state_cache;
27 static struct kmem_cache *extent_buffer_cache;
28
29 static LIST_HEAD(buffers);
30 static LIST_HEAD(states);
31
32 #define LEAK_DEBUG 0
33 #if LEAK_DEBUG
34 static DEFINE_SPINLOCK(leak_lock);
35 #endif
36
37 #define BUFFER_LRU_MAX 64
38
39 struct tree_entry {
40         u64 start;
41         u64 end;
42         struct rb_node rb_node;
43 };
44
45 struct extent_page_data {
46         struct bio *bio;
47         struct extent_io_tree *tree;
48         get_extent_t *get_extent;
49
50         /* tells writepage not to lock the state bits for this range
51          * it still does the unlocking
52          */
53         unsigned int extent_locked:1;
54
55         /* tells the submit_bio code to use a WRITE_SYNC */
56         unsigned int sync_io:1;
57 };
58
59 int __init extent_io_init(void)
60 {
61         extent_state_cache = btrfs_cache_create("extent_state",
62                                             sizeof(struct extent_state), 0,
63                                             NULL);
64         if (!extent_state_cache)
65                 return -ENOMEM;
66
67         extent_buffer_cache = btrfs_cache_create("extent_buffers",
68                                             sizeof(struct extent_buffer), 0,
69                                             NULL);
70         if (!extent_buffer_cache)
71                 goto free_state_cache;
72         return 0;
73
74 free_state_cache:
75         kmem_cache_destroy(extent_state_cache);
76         return -ENOMEM;
77 }
78
79 void extent_io_exit(void)
80 {
81         struct extent_state *state;
82         struct extent_buffer *eb;
83
84         while (!list_empty(&states)) {
85                 state = list_entry(states.next, struct extent_state, leak_list);
86                 printk(KERN_ERR "btrfs state leak: start %llu end %llu "
87                        "state %lu in tree %p refs %d\n",
88                        (unsigned long long)state->start,
89                        (unsigned long long)state->end,
90                        state->state, state->tree, atomic_read(&state->refs));
91                 list_del(&state->leak_list);
92                 kmem_cache_free(extent_state_cache, state);
93
94         }
95
96         while (!list_empty(&buffers)) {
97                 eb = list_entry(buffers.next, struct extent_buffer, leak_list);
98                 printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
99                        "refs %d\n", (unsigned long long)eb->start,
100                        eb->len, atomic_read(&eb->refs));
101                 list_del(&eb->leak_list);
102                 kmem_cache_free(extent_buffer_cache, eb);
103         }
104         if (extent_state_cache)
105                 kmem_cache_destroy(extent_state_cache);
106         if (extent_buffer_cache)
107                 kmem_cache_destroy(extent_buffer_cache);
108 }
109
110 void extent_io_tree_init(struct extent_io_tree *tree,
111                           struct address_space *mapping, gfp_t mask)
112 {
113         tree->state.rb_node = NULL;
114         tree->buffer.rb_node = NULL;
115         tree->ops = NULL;
116         tree->dirty_bytes = 0;
117         spin_lock_init(&tree->lock);
118         spin_lock_init(&tree->buffer_lock);
119         tree->mapping = mapping;
120 }
121
122 static struct extent_state *alloc_extent_state(gfp_t mask)
123 {
124         struct extent_state *state;
125 #if LEAK_DEBUG
126         unsigned long flags;
127 #endif
128
129         state = kmem_cache_alloc(extent_state_cache, mask);
130         if (!state)
131                 return state;
132         state->state = 0;
133         state->private = 0;
134         state->tree = NULL;
135 #if LEAK_DEBUG
136         spin_lock_irqsave(&leak_lock, flags);
137         list_add(&state->leak_list, &states);
138         spin_unlock_irqrestore(&leak_lock, flags);
139 #endif
140         atomic_set(&state->refs, 1);
141         init_waitqueue_head(&state->wq);
142         return state;
143 }
144
145 static void free_extent_state(struct extent_state *state)
146 {
147         if (!state)
148                 return;
149         if (atomic_dec_and_test(&state->refs)) {
150 #if LEAK_DEBUG
151                 unsigned long flags;
152 #endif
153                 WARN_ON(state->tree);
154 #if LEAK_DEBUG
155                 spin_lock_irqsave(&leak_lock, flags);
156                 list_del(&state->leak_list);
157                 spin_unlock_irqrestore(&leak_lock, flags);
158 #endif
159                 kmem_cache_free(extent_state_cache, state);
160         }
161 }
162
163 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
164                                    struct rb_node *node)
165 {
166         struct rb_node **p = &root->rb_node;
167         struct rb_node *parent = NULL;
168         struct tree_entry *entry;
169
170         while (*p) {
171                 parent = *p;
172                 entry = rb_entry(parent, struct tree_entry, rb_node);
173
174                 if (offset < entry->start)
175                         p = &(*p)->rb_left;
176                 else if (offset > entry->end)
177                         p = &(*p)->rb_right;
178                 else
179                         return parent;
180         }
181
182         entry = rb_entry(node, struct tree_entry, rb_node);
183         rb_link_node(node, parent, p);
184         rb_insert_color(node, root);
185         return NULL;
186 }
187
188 static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
189                                      struct rb_node **prev_ret,
190                                      struct rb_node **next_ret)
191 {
192         struct rb_root *root = &tree->state;
193         struct rb_node *n = root->rb_node;
194         struct rb_node *prev = NULL;
195         struct rb_node *orig_prev = NULL;
196         struct tree_entry *entry;
197         struct tree_entry *prev_entry = NULL;
198
199         while (n) {
200                 entry = rb_entry(n, struct tree_entry, rb_node);
201                 prev = n;
202                 prev_entry = entry;
203
204                 if (offset < entry->start)
205                         n = n->rb_left;
206                 else if (offset > entry->end)
207                         n = n->rb_right;
208                 else
209                         return n;
210         }
211
212         if (prev_ret) {
213                 orig_prev = prev;
214                 while (prev && offset > prev_entry->end) {
215                         prev = rb_next(prev);
216                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217                 }
218                 *prev_ret = prev;
219                 prev = orig_prev;
220         }
221
222         if (next_ret) {
223                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
224                 while (prev && offset < prev_entry->start) {
225                         prev = rb_prev(prev);
226                         prev_entry = rb_entry(prev, struct tree_entry, rb_node);
227                 }
228                 *next_ret = prev;
229         }
230         return NULL;
231 }
232
233 static inline struct rb_node *tree_search(struct extent_io_tree *tree,
234                                           u64 offset)
235 {
236         struct rb_node *prev = NULL;
237         struct rb_node *ret;
238
239         ret = __etree_search(tree, offset, &prev, NULL);
240         if (!ret)
241                 return prev;
242         return ret;
243 }
244
245 static struct extent_buffer *buffer_tree_insert(struct extent_io_tree *tree,
246                                           u64 offset, struct rb_node *node)
247 {
248         struct rb_root *root = &tree->buffer;
249         struct rb_node **p = &root->rb_node;
250         struct rb_node *parent = NULL;
251         struct extent_buffer *eb;
252
253         while (*p) {
254                 parent = *p;
255                 eb = rb_entry(parent, struct extent_buffer, rb_node);
256
257                 if (offset < eb->start)
258                         p = &(*p)->rb_left;
259                 else if (offset > eb->start)
260                         p = &(*p)->rb_right;
261                 else
262                         return eb;
263         }
264
265         rb_link_node(node, parent, p);
266         rb_insert_color(node, root);
267         return NULL;
268 }
269
270 static struct extent_buffer *buffer_search(struct extent_io_tree *tree,
271                                            u64 offset)
272 {
273         struct rb_root *root = &tree->buffer;
274         struct rb_node *n = root->rb_node;
275         struct extent_buffer *eb;
276
277         while (n) {
278                 eb = rb_entry(n, struct extent_buffer, rb_node);
279                 if (offset < eb->start)
280                         n = n->rb_left;
281                 else if (offset > eb->start)
282                         n = n->rb_right;
283                 else
284                         return eb;
285         }
286         return NULL;
287 }
288
289 /*
290  * utility function to look for merge candidates inside a given range.
291  * Any extents with matching state are merged together into a single
292  * extent in the tree.  Extents with EXTENT_IO in their state field
293  * are not merged because the end_io handlers need to be able to do
294  * operations on them without sleeping (or doing allocations/splits).
295  *
296  * This should be called with the tree lock held.
297  */
298 static int merge_state(struct extent_io_tree *tree,
299                        struct extent_state *state)
300 {
301         struct extent_state *other;
302         struct rb_node *other_node;
303
304         if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
305                 return 0;
306
307         other_node = rb_prev(&state->rb_node);
308         if (other_node) {
309                 other = rb_entry(other_node, struct extent_state, rb_node);
310                 if (other->end == state->start - 1 &&
311                     other->state == state->state) {
312                         state->start = other->start;
313                         other->tree = NULL;
314                         rb_erase(&other->rb_node, &tree->state);
315                         free_extent_state(other);
316                 }
317         }
318         other_node = rb_next(&state->rb_node);
319         if (other_node) {
320                 other = rb_entry(other_node, struct extent_state, rb_node);
321                 if (other->start == state->end + 1 &&
322                     other->state == state->state) {
323                         other->start = state->start;
324                         state->tree = NULL;
325                         rb_erase(&state->rb_node, &tree->state);
326                         free_extent_state(state);
327                 }
328         }
329         return 0;
330 }
331
332 static void set_state_cb(struct extent_io_tree *tree,
333                          struct extent_state *state,
334                          unsigned long bits)
335 {
336         if (tree->ops && tree->ops->set_bit_hook) {
337                 tree->ops->set_bit_hook(tree->mapping->host, state->start,
338                                         state->end, state->state, bits);
339         }
340 }
341
342 static void clear_state_cb(struct extent_io_tree *tree,
343                            struct extent_state *state,
344                            unsigned long bits)
345 {
346         if (tree->ops && tree->ops->clear_bit_hook) {
347                 tree->ops->clear_bit_hook(tree->mapping->host, state->start,
348                                           state->end, state->state, bits);
349         }
350 }
351
352 /*
353  * insert an extent_state struct into the tree.  'bits' are set on the
354  * struct before it is inserted.
355  *
356  * This may return -EEXIST if the extent is already there, in which case the
357  * state struct is freed.
358  *
359  * The tree lock is not taken internally.  This is a utility function and
360  * probably isn't what you want to call (see set/clear_extent_bit).
361  */
362 static int insert_state(struct extent_io_tree *tree,
363                         struct extent_state *state, u64 start, u64 end,
364                         int bits)
365 {
366         struct rb_node *node;
367
368         if (end < start) {
369                 printk(KERN_ERR "btrfs end < start %llu %llu\n",
370                        (unsigned long long)end,
371                        (unsigned long long)start);
372                 WARN_ON(1);
373         }
374         if (bits & EXTENT_DIRTY)
375                 tree->dirty_bytes += end - start + 1;
376         set_state_cb(tree, state, bits);
377         state->state |= bits;
378         state->start = start;
379         state->end = end;
380         node = tree_insert(&tree->state, end, &state->rb_node);
381         if (node) {
382                 struct extent_state *found;
383                 found = rb_entry(node, struct extent_state, rb_node);
384                 printk(KERN_ERR "btrfs found node %llu %llu on insert of "
385                        "%llu %llu\n", (unsigned long long)found->start,
386                        (unsigned long long)found->end,
387                        (unsigned long long)start, (unsigned long long)end);
388                 free_extent_state(state);
389                 return -EEXIST;
390         }
391         state->tree = tree;
392         merge_state(tree, state);
393         return 0;
394 }
395
396 /*
397  * split a given extent state struct in two, inserting the preallocated
398  * struct 'prealloc' as the newly created second half.  'split' indicates an
399  * offset inside 'orig' where it should be split.
400  *
401  * Before calling,
402  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
403  * are two extent state structs in the tree:
404  * prealloc: [orig->start, split - 1]
405  * orig: [ split, orig->end ]
406  *
407  * The tree locks are not taken by this function. They need to be held
408  * by the caller.
409  */
410 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
411                        struct extent_state *prealloc, u64 split)
412 {
413         struct rb_node *node;
414         prealloc->start = orig->start;
415         prealloc->end = split - 1;
416         prealloc->state = orig->state;
417         orig->start = split;
418
419         node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
420         if (node) {
421                 free_extent_state(prealloc);
422                 return -EEXIST;
423         }
424         prealloc->tree = tree;
425         return 0;
426 }
427
428 /*
429  * utility function to clear some bits in an extent state struct.
430  * it will optionally wake up any one waiting on this state (wake == 1), or
431  * forcibly remove the state from the tree (delete == 1).
432  *
433  * If no bits are set on the state struct after clearing things, the
434  * struct is freed and removed from the tree
435  */
436 static int clear_state_bit(struct extent_io_tree *tree,
437                             struct extent_state *state, int bits, int wake,
438                             int delete)
439 {
440         int ret = state->state & bits;
441
442         if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
443                 u64 range = state->end - state->start + 1;
444                 WARN_ON(range > tree->dirty_bytes);
445                 tree->dirty_bytes -= range;
446         }
447         clear_state_cb(tree, state, bits);
448         state->state &= ~bits;
449         if (wake)
450                 wake_up(&state->wq);
451         if (delete || state->state == 0) {
452                 if (state->tree) {
453                         clear_state_cb(tree, state, state->state);
454                         rb_erase(&state->rb_node, &tree->state);
455                         state->tree = NULL;
456                         free_extent_state(state);
457                 } else {
458                         WARN_ON(1);
459                 }
460         } else {
461                 merge_state(tree, state);
462         }
463         return ret;
464 }
465
466 /*
467  * clear some bits on a range in the tree.  This may require splitting
468  * or inserting elements in the tree, so the gfp mask is used to
469  * indicate which allocations or sleeping are allowed.
470  *
471  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
472  * the given range from the tree regardless of state (ie for truncate).
473  *
474  * the range [start, end] is inclusive.
475  *
476  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
477  * bits were already set, or zero if none of the bits were already set.
478  */
479 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
480                      int bits, int wake, int delete, gfp_t mask)
481 {
482         struct extent_state *state;
483         struct extent_state *prealloc = NULL;
484         struct rb_node *node;
485         int err;
486         int set = 0;
487
488 again:
489         if (!prealloc && (mask & __GFP_WAIT)) {
490                 prealloc = alloc_extent_state(mask);
491                 if (!prealloc)
492                         return -ENOMEM;
493         }
494
495         spin_lock(&tree->lock);
496         /*
497          * this search will find the extents that end after
498          * our range starts
499          */
500         node = tree_search(tree, start);
501         if (!node)
502                 goto out;
503         state = rb_entry(node, struct extent_state, rb_node);
504         if (state->start > end)
505                 goto out;
506         WARN_ON(state->end < start);
507
508         /*
509          *     | ---- desired range ---- |
510          *  | state | or
511          *  | ------------- state -------------- |
512          *
513          * We need to split the extent we found, and may flip
514          * bits on second half.
515          *
516          * If the extent we found extends past our range, we
517          * just split and search again.  It'll get split again
518          * the next time though.
519          *
520          * If the extent we found is inside our range, we clear
521          * the desired bit on it.
522          */
523
524         if (state->start < start) {
525                 if (!prealloc)
526                         prealloc = alloc_extent_state(GFP_ATOMIC);
527                 err = split_state(tree, state, prealloc, start);
528                 BUG_ON(err == -EEXIST);
529                 prealloc = NULL;
530                 if (err)
531                         goto out;
532                 if (state->end <= end) {
533                         start = state->end + 1;
534                         set |= clear_state_bit(tree, state, bits,
535                                         wake, delete);
536                 } else {
537                         start = state->start;
538                 }
539                 goto search_again;
540         }
541         /*
542          * | ---- desired range ---- |
543          *                        | state |
544          * We need to split the extent, and clear the bit
545          * on the first half
546          */
547         if (state->start <= end && state->end > end) {
548                 if (!prealloc)
549                         prealloc = alloc_extent_state(GFP_ATOMIC);
550                 err = split_state(tree, state, prealloc, end + 1);
551                 BUG_ON(err == -EEXIST);
552
553                 if (wake)
554                         wake_up(&state->wq);
555                 set |= clear_state_bit(tree, prealloc, bits,
556                                        wake, delete);
557                 prealloc = NULL;
558                 goto out;
559         }
560
561         start = state->end + 1;
562         set |= clear_state_bit(tree, state, bits, wake, delete);
563         goto search_again;
564
565 out:
566         spin_unlock(&tree->lock);
567         if (prealloc)
568                 free_extent_state(prealloc);
569
570         return set;
571
572 search_again:
573         if (start > end)
574                 goto out;
575         spin_unlock(&tree->lock);
576         if (mask & __GFP_WAIT)
577                 cond_resched();
578         goto again;
579 }
580
581 static int wait_on_state(struct extent_io_tree *tree,
582                          struct extent_state *state)
583                 __releases(tree->lock)
584                 __acquires(tree->lock)
585 {
586         DEFINE_WAIT(wait);
587         prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
588         spin_unlock(&tree->lock);
589         schedule();
590         spin_lock(&tree->lock);
591         finish_wait(&state->wq, &wait);
592         return 0;
593 }
594
595 /*
596  * waits for one or more bits to clear on a range in the state tree.
597  * The range [start, end] is inclusive.
598  * The tree lock is taken by this function
599  */
600 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
601 {
602         struct extent_state *state;
603         struct rb_node *node;
604
605         spin_lock(&tree->lock);
606 again:
607         while (1) {
608                 /*
609                  * this search will find all the extents that end after
610                  * our range starts
611                  */
612                 node = tree_search(tree, start);
613                 if (!node)
614                         break;
615
616                 state = rb_entry(node, struct extent_state, rb_node);
617
618                 if (state->start > end)
619                         goto out;
620
621                 if (state->state & bits) {
622                         start = state->start;
623                         atomic_inc(&state->refs);
624                         wait_on_state(tree, state);
625                         free_extent_state(state);
626                         goto again;
627                 }
628                 start = state->end + 1;
629
630                 if (start > end)
631                         break;
632
633                 if (need_resched()) {
634                         spin_unlock(&tree->lock);
635                         cond_resched();
636                         spin_lock(&tree->lock);
637                 }
638         }
639 out:
640         spin_unlock(&tree->lock);
641         return 0;
642 }
643
644 static void set_state_bits(struct extent_io_tree *tree,
645                            struct extent_state *state,
646                            int bits)
647 {
648         if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
649                 u64 range = state->end - state->start + 1;
650                 tree->dirty_bytes += range;
651         }
652         set_state_cb(tree, state, bits);
653         state->state |= bits;
654 }
655
656 /*
657  * set some bits on a range in the tree.  This may require allocations
658  * or sleeping, so the gfp mask is used to indicate what is allowed.
659  *
660  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
661  * range already has the desired bits set.  The start of the existing
662  * range is returned in failed_start in this case.
663  *
664  * [start, end] is inclusive
665  * This takes the tree lock.
666  */
667 static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
668                           int bits, int exclusive, u64 *failed_start,
669                           gfp_t mask)
670 {
671         struct extent_state *state;
672         struct extent_state *prealloc = NULL;
673         struct rb_node *node;
674         int err = 0;
675         int set;
676         u64 last_start;
677         u64 last_end;
678 again:
679         if (!prealloc && (mask & __GFP_WAIT)) {
680                 prealloc = alloc_extent_state(mask);
681                 if (!prealloc)
682                         return -ENOMEM;
683         }
684
685         spin_lock(&tree->lock);
686         /*
687          * this search will find all the extents that end after
688          * our range starts.
689          */
690         node = tree_search(tree, start);
691         if (!node) {
692                 err = insert_state(tree, prealloc, start, end, bits);
693                 prealloc = NULL;
694                 BUG_ON(err == -EEXIST);
695                 goto out;
696         }
697
698         state = rb_entry(node, struct extent_state, rb_node);
699         last_start = state->start;
700         last_end = state->end;
701
702         /*
703          * | ---- desired range ---- |
704          * | state |
705          *
706          * Just lock what we found and keep going
707          */
708         if (state->start == start && state->end <= end) {
709                 set = state->state & bits;
710                 if (set && exclusive) {
711                         *failed_start = state->start;
712                         err = -EEXIST;
713                         goto out;
714                 }
715                 set_state_bits(tree, state, bits);
716                 start = state->end + 1;
717                 merge_state(tree, state);
718                 goto search_again;
719         }
720
721         /*
722          *     | ---- desired range ---- |
723          * | state |
724          *   or
725          * | ------------- state -------------- |
726          *
727          * We need to split the extent we found, and may flip bits on
728          * second half.
729          *
730          * If the extent we found extends past our
731          * range, we just split and search again.  It'll get split
732          * again the next time though.
733          *
734          * If the extent we found is inside our range, we set the
735          * desired bit on it.
736          */
737         if (state->start < start) {
738                 set = state->state & bits;
739                 if (exclusive && set) {
740                         *failed_start = start;
741                         err = -EEXIST;
742                         goto out;
743                 }
744                 err = split_state(tree, state, prealloc, start);
745                 BUG_ON(err == -EEXIST);
746                 prealloc = NULL;
747                 if (err)
748                         goto out;
749                 if (state->end <= end) {
750                         set_state_bits(tree, state, bits);
751                         start = state->end + 1;
752                         merge_state(tree, state);
753                 } else {
754                         start = state->start;
755                 }
756                 goto search_again;
757         }
758         /*
759          * | ---- desired range ---- |
760          *     | state | or               | state |
761          *
762          * There's a hole, we need to insert something in it and
763          * ignore the extent we found.
764          */
765         if (state->start > start) {
766                 u64 this_end;
767                 if (end < last_start)
768                         this_end = end;
769                 else
770                         this_end = last_start - 1;
771                 err = insert_state(tree, prealloc, start, this_end,
772                                    bits);
773                 prealloc = NULL;
774                 BUG_ON(err == -EEXIST);
775                 if (err)
776                         goto out;
777                 start = this_end + 1;
778                 goto search_again;
779         }
780         /*
781          * | ---- desired range ---- |
782          *                        | state |
783          * We need to split the extent, and set the bit
784          * on the first half
785          */
786         if (state->start <= end && state->end > end) {
787                 set = state->state & bits;
788                 if (exclusive && set) {
789                         *failed_start = start;
790                         err = -EEXIST;
791                         goto out;
792                 }
793                 err = split_state(tree, state, prealloc, end + 1);
794                 BUG_ON(err == -EEXIST);
795
796                 set_state_bits(tree, prealloc, bits);
797                 merge_state(tree, prealloc);
798                 prealloc = NULL;
799                 goto out;
800         }
801
802         goto search_again;
803
804 out:
805         spin_unlock(&tree->lock);
806         if (prealloc)
807                 free_extent_state(prealloc);
808
809         return err;
810
811 search_again:
812         if (start > end)
813                 goto out;
814         spin_unlock(&tree->lock);
815         if (mask & __GFP_WAIT)
816                 cond_resched();
817         goto again;
818 }
819
820 /* wrappers around set/clear extent bit */
821 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
822                      gfp_t mask)
823 {
824         return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
825                               mask);
826 }
827
828 int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
829                        gfp_t mask)
830 {
831         return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, mask);
832 }
833
834 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
835                     int bits, gfp_t mask)
836 {
837         return set_extent_bit(tree, start, end, bits, 0, NULL,
838                               mask);
839 }
840
841 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
842                       int bits, gfp_t mask)
843 {
844         return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845 }
846
847 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
848                      gfp_t mask)
849 {
850         return set_extent_bit(tree, start, end,
851                               EXTENT_DELALLOC | EXTENT_DIRTY,
852                               0, NULL, mask);
853 }
854
855 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
856                        gfp_t mask)
857 {
858         return clear_extent_bit(tree, start, end,
859                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
860 }
861
862 int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
863                          gfp_t mask)
864 {
865         return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0, mask);
866 }
867
868 int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
869                      gfp_t mask)
870 {
871         return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
872                               mask);
873 }
874
875 static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
876                        gfp_t mask)
877 {
878         return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
879 }
880
881 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
882                         gfp_t mask)
883 {
884         return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
885                               mask);
886 }
887
888 static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
889                                  u64 end, gfp_t mask)
890 {
891         return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
892 }
893
894 static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
895                          gfp_t mask)
896 {
897         return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
898                               0, NULL, mask);
899 }
900
901 static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
902                                   u64 end, gfp_t mask)
903 {
904         return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
905 }
906
907 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
908 {
909         return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
910 }
911
912 /*
913  * either insert or lock state struct between start and end use mask to tell
914  * us if waiting is desired.
915  */
916 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
917 {
918         int err;
919         u64 failed_start;
920         while (1) {
921                 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
922                                      &failed_start, mask);
923                 if (err == -EEXIST && (mask & __GFP_WAIT)) {
924                         wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
925                         start = failed_start;
926                 } else {
927                         break;
928                 }
929                 WARN_ON(start > end);
930         }
931         return err;
932 }
933
934 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
935                     gfp_t mask)
936 {
937         int err;
938         u64 failed_start;
939
940         err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
941                              &failed_start, mask);
942         if (err == -EEXIST) {
943                 if (failed_start > start)
944                         clear_extent_bit(tree, start, failed_start - 1,
945                                          EXTENT_LOCKED, 1, 0, mask);
946                 return 0;
947         }
948         return 1;
949 }
950
951 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end,
952                   gfp_t mask)
953 {
954         return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
955 }
956
957 /*
958  * helper function to set pages and extents in the tree dirty
959  */
960 int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
961 {
962         unsigned long index = start >> PAGE_CACHE_SHIFT;
963         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
964         struct page *page;
965
966         while (index <= end_index) {
967                 page = find_get_page(tree->mapping, index);
968                 BUG_ON(!page);
969                 __set_page_dirty_nobuffers(page);
970                 page_cache_release(page);
971                 index++;
972         }
973         set_extent_dirty(tree, start, end, GFP_NOFS);
974         return 0;
975 }
976
977 /*
978  * helper function to set both pages and extents in the tree writeback
979  */
980 static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
981 {
982         unsigned long index = start >> PAGE_CACHE_SHIFT;
983         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
984         struct page *page;
985
986         while (index <= end_index) {
987                 page = find_get_page(tree->mapping, index);
988                 BUG_ON(!page);
989                 set_page_writeback(page);
990                 page_cache_release(page);
991                 index++;
992         }
993         set_extent_writeback(tree, start, end, GFP_NOFS);
994         return 0;
995 }
996
997 /*
998  * find the first offset in the io tree with 'bits' set. zero is
999  * returned if we find something, and *start_ret and *end_ret are
1000  * set to reflect the state struct that was found.
1001  *
1002  * If nothing was found, 1 is returned, < 0 on error
1003  */
1004 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1005                           u64 *start_ret, u64 *end_ret, int bits)
1006 {
1007         struct rb_node *node;
1008         struct extent_state *state;
1009         int ret = 1;
1010
1011         spin_lock(&tree->lock);
1012         /*
1013          * this search will find all the extents that end after
1014          * our range starts.
1015          */
1016         node = tree_search(tree, start);
1017         if (!node)
1018                 goto out;
1019
1020         while (1) {
1021                 state = rb_entry(node, struct extent_state, rb_node);
1022                 if (state->end >= start && (state->state & bits)) {
1023                         *start_ret = state->start;
1024                         *end_ret = state->end;
1025                         ret = 0;
1026                         break;
1027                 }
1028                 node = rb_next(node);
1029                 if (!node)
1030                         break;
1031         }
1032 out:
1033         spin_unlock(&tree->lock);
1034         return ret;
1035 }
1036
1037 /* find the first state struct with 'bits' set after 'start', and
1038  * return it.  tree->lock must be held.  NULL will returned if
1039  * nothing was found after 'start'
1040  */
1041 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
1042                                                  u64 start, int bits)
1043 {
1044         struct rb_node *node;
1045         struct extent_state *state;
1046
1047         /*
1048          * this search will find all the extents that end after
1049          * our range starts.
1050          */
1051         node = tree_search(tree, start);
1052         if (!node)
1053                 goto out;
1054
1055         while (1) {
1056                 state = rb_entry(node, struct extent_state, rb_node);
1057                 if (state->end >= start && (state->state & bits))
1058                         return state;
1059
1060                 node = rb_next(node);
1061                 if (!node)
1062                         break;
1063         }
1064 out:
1065         return NULL;
1066 }
1067
1068 /*
1069  * find a contiguous range of bytes in the file marked as delalloc, not
1070  * more than 'max_bytes'.  start and end are used to return the range,
1071  *
1072  * 1 is returned if we find something, 0 if nothing was in the tree
1073  */
1074 static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
1075                                         u64 *start, u64 *end, u64 max_bytes)
1076 {
1077         struct rb_node *node;
1078         struct extent_state *state;
1079         u64 cur_start = *start;
1080         u64 found = 0;
1081         u64 total_bytes = 0;
1082
1083         spin_lock(&tree->lock);
1084
1085         /*
1086          * this search will find all the extents that end after
1087          * our range starts.
1088          */
1089         node = tree_search(tree, cur_start);
1090         if (!node) {
1091                 if (!found)
1092                         *end = (u64)-1;
1093                 goto out;
1094         }
1095
1096         while (1) {
1097                 state = rb_entry(node, struct extent_state, rb_node);
1098                 if (found && (state->start != cur_start ||
1099                               (state->state & EXTENT_BOUNDARY))) {
1100                         goto out;
1101                 }
1102                 if (!(state->state & EXTENT_DELALLOC)) {
1103                         if (!found)
1104                                 *end = state->end;
1105                         goto out;
1106                 }
1107                 if (!found)
1108                         *start = state->start;
1109                 found++;
1110                 *end = state->end;
1111                 cur_start = state->end + 1;
1112                 node = rb_next(node);
1113                 if (!node)
1114                         break;
1115                 total_bytes += state->end - state->start + 1;
1116                 if (total_bytes >= max_bytes)
1117                         break;
1118         }
1119 out:
1120         spin_unlock(&tree->lock);
1121         return found;
1122 }
1123
1124 static noinline int __unlock_for_delalloc(struct inode *inode,
1125                                           struct page *locked_page,
1126                                           u64 start, u64 end)
1127 {
1128         int ret;
1129         struct page *pages[16];
1130         unsigned long index = start >> PAGE_CACHE_SHIFT;
1131         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1132         unsigned long nr_pages = end_index - index + 1;
1133         int i;
1134
1135         if (index == locked_page->index && end_index == index)
1136                 return 0;
1137
1138         while (nr_pages > 0) {
1139                 ret = find_get_pages_contig(inode->i_mapping, index,
1140                                      min_t(unsigned long, nr_pages,
1141                                      ARRAY_SIZE(pages)), pages);
1142                 for (i = 0; i < ret; i++) {
1143                         if (pages[i] != locked_page)
1144                                 unlock_page(pages[i]);
1145                         page_cache_release(pages[i]);
1146                 }
1147                 nr_pages -= ret;
1148                 index += ret;
1149                 cond_resched();
1150         }
1151         return 0;
1152 }
1153
1154 static noinline int lock_delalloc_pages(struct inode *inode,
1155                                         struct page *locked_page,
1156                                         u64 delalloc_start,
1157                                         u64 delalloc_end)
1158 {
1159         unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
1160         unsigned long start_index = index;
1161         unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
1162         unsigned long pages_locked = 0;
1163         struct page *pages[16];
1164         unsigned long nrpages;
1165         int ret;
1166         int i;
1167
1168         /* the caller is responsible for locking the start index */
1169         if (index == locked_page->index && index == end_index)
1170                 return 0;
1171
1172         /* skip the page at the start index */
1173         nrpages = end_index - index + 1;
1174         while (nrpages > 0) {
1175                 ret = find_get_pages_contig(inode->i_mapping, index,
1176                                      min_t(unsigned long,
1177                                      nrpages, ARRAY_SIZE(pages)), pages);
1178                 if (ret == 0) {
1179                         ret = -EAGAIN;
1180                         goto done;
1181                 }
1182                 /* now we have an array of pages, lock them all */
1183                 for (i = 0; i < ret; i++) {
1184                         /*
1185                          * the caller is taking responsibility for
1186                          * locked_page
1187                          */
1188                         if (pages[i] != locked_page) {
1189                                 lock_page(pages[i]);
1190                                 if (!PageDirty(pages[i]) ||
1191                                     pages[i]->mapping != inode->i_mapping) {
1192                                         ret = -EAGAIN;
1193                                         unlock_page(pages[i]);
1194                                         page_cache_release(pages[i]);
1195                                         goto done;
1196                                 }
1197                         }
1198                         page_cache_release(pages[i]);
1199                         pages_locked++;
1200                 }
1201                 nrpages -= ret;
1202                 index += ret;
1203                 cond_resched();
1204         }
1205         ret = 0;
1206 done:
1207         if (ret && pages_locked) {
1208                 __unlock_for_delalloc(inode, locked_page,
1209                               delalloc_start,
1210                               ((u64)(start_index + pages_locked - 1)) <<
1211                               PAGE_CACHE_SHIFT);
1212         }
1213         return ret;
1214 }
1215
1216 /*
1217  * find a contiguous range of bytes in the file marked as delalloc, not
1218  * more than 'max_bytes'.  start and end are used to return the range,
1219  *
1220  * 1 is returned if we find something, 0 if nothing was in the tree
1221  */
1222 static noinline u64 find_lock_delalloc_range(struct inode *inode,
1223                                              struct extent_io_tree *tree,
1224                                              struct page *locked_page,
1225                                              u64 *start, u64 *end,
1226                                              u64 max_bytes)
1227 {
1228         u64 delalloc_start;
1229         u64 delalloc_end;
1230         u64 found;
1231         int ret;
1232         int loops = 0;
1233
1234 again:
1235         /* step one, find a bunch of delalloc bytes starting at start */
1236         delalloc_start = *start;
1237         delalloc_end = 0;
1238         found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
1239                                     max_bytes);
1240         if (!found || delalloc_end <= *start) {
1241                 *start = delalloc_start;
1242                 *end = delalloc_end;
1243                 return found;
1244         }
1245
1246         /*
1247          * start comes from the offset of locked_page.  We have to lock
1248          * pages in order, so we can't process delalloc bytes before
1249          * locked_page
1250          */
1251         if (delalloc_start < *start)
1252                 delalloc_start = *start;
1253
1254         /*
1255          * make sure to limit the number of pages we try to lock down
1256          * if we're looping.
1257          */
1258         if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
1259                 delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
1260
1261         /* step two, lock all the pages after the page that has start */
1262         ret = lock_delalloc_pages(inode, locked_page,
1263                                   delalloc_start, delalloc_end);
1264         if (ret == -EAGAIN) {
1265                 /* some of the pages are gone, lets avoid looping by
1266                  * shortening the size of the delalloc range we're searching
1267                  */
1268                 if (!loops) {
1269                         unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
1270                         max_bytes = PAGE_CACHE_SIZE - offset;
1271                         loops = 1;
1272                         goto again;
1273                 } else {
1274                         found = 0;
1275                         goto out_failed;
1276                 }
1277         }
1278         BUG_ON(ret);
1279
1280         /* step three, lock the state bits for the whole range */
1281         lock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1282
1283         /* then test to make sure it is all still delalloc */
1284         ret = test_range_bit(tree, delalloc_start, delalloc_end,
1285                              EXTENT_DELALLOC, 1);
1286         if (!ret) {
1287                 unlock_extent(tree, delalloc_start, delalloc_end, GFP_NOFS);
1288                 __unlock_for_delalloc(inode, locked_page,
1289                               delalloc_start, delalloc_end);
1290                 cond_resched();
1291                 goto again;
1292         }
1293         *start = delalloc_start;
1294         *end = delalloc_end;
1295 out_failed:
1296         return found;
1297 }
1298
1299 int extent_clear_unlock_delalloc(struct inode *inode,
1300                                 struct extent_io_tree *tree,
1301                                 u64 start, u64 end, struct page *locked_page,
1302                                 int unlock_pages,
1303                                 int clear_unlock,
1304                                 int clear_delalloc, int clear_dirty,
1305                                 int set_writeback,
1306                                 int end_writeback)
1307 {
1308         int ret;
1309         struct page *pages[16];
1310         unsigned long index = start >> PAGE_CACHE_SHIFT;
1311         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1312         unsigned long nr_pages = end_index - index + 1;
1313         int i;
1314         int clear_bits = 0;
1315
1316         if (clear_unlock)
1317                 clear_bits |= EXTENT_LOCKED;
1318         if (clear_dirty)
1319                 clear_bits |= EXTENT_DIRTY;
1320
1321         if (clear_delalloc)
1322                 clear_bits |= EXTENT_DELALLOC;
1323
1324         clear_extent_bit(tree, start, end, clear_bits, 1, 0, GFP_NOFS);
1325         if (!(unlock_pages || clear_dirty || set_writeback || end_writeback))
1326                 return 0;
1327
1328         while (nr_pages > 0) {
1329                 ret = find_get_pages_contig(inode->i_mapping, index,
1330                                      min_t(unsigned long,
1331                                      nr_pages, ARRAY_SIZE(pages)), pages);
1332                 for (i = 0; i < ret; i++) {
1333                         if (pages[i] == locked_page) {
1334                                 page_cache_release(pages[i]);
1335                                 continue;
1336                         }
1337                         if (clear_dirty)
1338                                 clear_page_dirty_for_io(pages[i]);
1339                         if (set_writeback)
1340                                 set_page_writeback(pages[i]);
1341                         if (end_writeback)
1342                                 end_page_writeback(pages[i]);
1343                         if (unlock_pages)
1344                                 unlock_page(pages[i]);
1345                         page_cache_release(pages[i]);
1346                 }
1347                 nr_pages -= ret;
1348                 index += ret;
1349                 cond_resched();
1350         }
1351         return 0;
1352 }
1353
1354 /*
1355  * count the number of bytes in the tree that have a given bit(s)
1356  * set.  This can be fairly slow, except for EXTENT_DIRTY which is
1357  * cached.  The total number found is returned.
1358  */
1359 u64 count_range_bits(struct extent_io_tree *tree,
1360                      u64 *start, u64 search_end, u64 max_bytes,
1361                      unsigned long bits)
1362 {
1363         struct rb_node *node;
1364         struct extent_state *state;
1365         u64 cur_start = *start;
1366         u64 total_bytes = 0;
1367         int found = 0;
1368
1369         if (search_end <= cur_start) {
1370                 WARN_ON(1);
1371                 return 0;
1372         }
1373
1374         spin_lock(&tree->lock);
1375         if (cur_start == 0 && bits == EXTENT_DIRTY) {
1376                 total_bytes = tree->dirty_bytes;
1377                 goto out;
1378         }
1379         /*
1380          * this search will find all the extents that end after
1381          * our range starts.
1382          */
1383         node = tree_search(tree, cur_start);
1384         if (!node)
1385                 goto out;
1386
1387         while (1) {
1388                 state = rb_entry(node, struct extent_state, rb_node);
1389                 if (state->start > search_end)
1390                         break;
1391                 if (state->end >= cur_start && (state->state & bits)) {
1392                         total_bytes += min(search_end, state->end) + 1 -
1393                                        max(cur_start, state->start);
1394                         if (total_bytes >= max_bytes)
1395                                 break;
1396                         if (!found) {
1397                                 *start = state->start;
1398                                 found = 1;
1399                         }
1400                 }
1401                 node = rb_next(node);
1402                 if (!node)
1403                         break;
1404         }
1405 out:
1406         spin_unlock(&tree->lock);
1407         return total_bytes;
1408 }
1409
1410 #if 0
1411 /*
1412  * helper function to lock both pages and extents in the tree.
1413  * pages must be locked first.
1414  */
1415 static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
1416 {
1417         unsigned long index = start >> PAGE_CACHE_SHIFT;
1418         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1419         struct page *page;
1420         int err;
1421
1422         while (index <= end_index) {
1423                 page = grab_cache_page(tree->mapping, index);
1424                 if (!page) {
1425                         err = -ENOMEM;
1426                         goto failed;
1427                 }
1428                 if (IS_ERR(page)) {
1429                         err = PTR_ERR(page);
1430                         goto failed;
1431                 }
1432                 index++;
1433         }
1434         lock_extent(tree, start, end, GFP_NOFS);
1435         return 0;
1436
1437 failed:
1438         /*
1439          * we failed above in getting the page at 'index', so we undo here
1440          * up to but not including the page at 'index'
1441          */
1442         end_index = index;
1443         index = start >> PAGE_CACHE_SHIFT;
1444         while (index < end_index) {
1445                 page = find_get_page(tree->mapping, index);
1446                 unlock_page(page);
1447                 page_cache_release(page);
1448                 index++;
1449         }
1450         return err;
1451 }
1452
1453 /*
1454  * helper function to unlock both pages and extents in the tree.
1455  */
1456 static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
1457 {
1458         unsigned long index = start >> PAGE_CACHE_SHIFT;
1459         unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1460         struct page *page;
1461
1462         while (index <= end_index) {
1463                 page = find_get_page(tree->mapping, index);
1464                 unlock_page(page);
1465                 page_cache_release(page);
1466                 index++;
1467         }
1468         unlock_extent(tree, start, end, GFP_NOFS);
1469         return 0;
1470 }
1471 #endif
1472
1473 /*
1474  * set the private field for a given byte offset in the tree.  If there isn't
1475  * an extent_state there already, this does nothing.
1476  */
1477 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
1478 {
1479         struct rb_node *node;
1480         struct extent_state *state;
1481         int ret = 0;
1482
1483         spin_lock(&tree->lock);
1484         /*
1485          * this search will find all the extents that end after
1486          * our range starts.
1487          */
1488         node = tree_search(tree, start);
1489         if (!node) {
1490                 ret = -ENOENT;
1491                 goto out;
1492         }
1493         state = rb_entry(node, struct extent_state, rb_node);
1494         if (state->start != start) {
1495                 ret = -ENOENT;
1496                 goto out;
1497         }
1498         state->private = private;
1499 out:
1500         spin_unlock(&tree->lock);
1501         return ret;
1502 }
1503
1504 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
1505 {
1506         struct rb_node *node;
1507         struct extent_state *state;
1508         int ret = 0;
1509
1510         spin_lock(&tree->lock);
1511         /*
1512          * this search will find all the extents that end after
1513          * our range starts.
1514          */
1515         node = tree_search(tree, start);
1516         if (!node) {
1517                 ret = -ENOENT;
1518                 goto out;
1519         }
1520         state = rb_entry(node, struct extent_state, rb_node);
1521         if (state->start != start) {
1522                 ret = -ENOENT;
1523                 goto out;
1524         }
1525         *private = state->private;
1526 out:
1527         spin_unlock(&tree->lock);
1528         return ret;
1529 }
1530
1531 /*
1532  * searches a range in the state tree for a given mask.
1533  * If 'filled' == 1, this returns 1 only if every extent in the tree
1534  * has the bits set.  Otherwise, 1 is returned if any bit in the
1535  * range is found set.
1536  */
1537 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1538                    int bits, int filled)
1539 {
1540         struct extent_state *state = NULL;
1541         struct rb_node *node;
1542         int bitset = 0;
1543
1544         spin_lock(&tree->lock);
1545         node = tree_search(tree, start);
1546         while (node && start <= end) {
1547                 state = rb_entry(node, struct extent_state, rb_node);
1548
1549                 if (filled && state->start > start) {
1550                         bitset = 0;
1551                         break;
1552                 }
1553
1554                 if (state->start > end)
1555                         break;
1556
1557                 if (state->state & bits) {
1558                         bitset = 1;
1559                         if (!filled)
1560                                 break;
1561                 } else if (filled) {
1562                         bitset = 0;
1563                         break;
1564                 }
1565                 start = state->end + 1;
1566                 if (start > end)
1567                         break;
1568                 node = rb_next(node);
1569                 if (!node) {
1570                         if (filled)
1571                                 bitset = 0;
1572                         break;
1573                 }
1574         }
1575         spin_unlock(&tree->lock);
1576         return bitset;
1577 }
1578
1579 /*
1580  * helper function to set a given page up to date if all the
1581  * extents in the tree for that page are up to date
1582  */
1583 static int check_page_uptodate(struct extent_io_tree *tree,
1584                                struct page *page)
1585 {
1586         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1587         u64 end = start + PAGE_CACHE_SIZE - 1;
1588         if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1589                 SetPageUptodate(page);
1590         return 0;
1591 }
1592
1593 /*
1594  * helper function to unlock a page if all the extents in the tree
1595  * for that page are unlocked
1596  */
1597 static int check_page_locked(struct extent_io_tree *tree,
1598                              struct page *page)
1599 {
1600         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1601         u64 end = start + PAGE_CACHE_SIZE - 1;
1602         if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1603                 unlock_page(page);
1604         return 0;
1605 }
1606
1607 /*
1608  * helper function to end page writeback if all the extents
1609  * in the tree for that page are done with writeback
1610  */
1611 static int check_page_writeback(struct extent_io_tree *tree,
1612                              struct page *page)
1613 {
1614         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1615         u64 end = start + PAGE_CACHE_SIZE - 1;
1616         if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1617                 end_page_writeback(page);
1618         return 0;
1619 }
1620
1621 /* lots and lots of room for performance fixes in the end_bio funcs */
1622
1623 /*
1624  * after a writepage IO is done, we need to:
1625  * clear the uptodate bits on error
1626  * clear the writeback bits in the extent tree for this IO
1627  * end_page_writeback if the page has no more pending IO
1628  *
1629  * Scheduling is not allowed, so the extent state tree is expected
1630  * to have one and only one object corresponding to this IO.
1631  */
1632 static void end_bio_extent_writepage(struct bio *bio, int err)
1633 {
1634         int uptodate = err == 0;
1635         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1636         struct extent_io_tree *tree;
1637         u64 start;
1638         u64 end;
1639         int whole_page;
1640         int ret;
1641
1642         do {
1643                 struct page *page = bvec->bv_page;
1644                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1645
1646                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1647                          bvec->bv_offset;
1648                 end = start + bvec->bv_len - 1;
1649
1650                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1651                         whole_page = 1;
1652                 else
1653                         whole_page = 0;
1654
1655                 if (--bvec >= bio->bi_io_vec)
1656                         prefetchw(&bvec->bv_page->flags);
1657                 if (tree->ops && tree->ops->writepage_end_io_hook) {
1658                         ret = tree->ops->writepage_end_io_hook(page, start,
1659                                                        end, NULL, uptodate);
1660                         if (ret)
1661                                 uptodate = 0;
1662                 }
1663
1664                 if (!uptodate && tree->ops &&
1665                     tree->ops->writepage_io_failed_hook) {
1666                         ret = tree->ops->writepage_io_failed_hook(bio, page,
1667                                                          start, end, NULL);
1668                         if (ret == 0) {
1669                                 uptodate = (err == 0);
1670                                 continue;
1671                         }
1672                 }
1673
1674                 if (!uptodate) {
1675                         clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1676                         ClearPageUptodate(page);
1677                         SetPageError(page);
1678                 }
1679
1680                 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1681
1682                 if (whole_page)
1683                         end_page_writeback(page);
1684                 else
1685                         check_page_writeback(tree, page);
1686         } while (bvec >= bio->bi_io_vec);
1687
1688         bio_put(bio);
1689 }
1690
1691 /*
1692  * after a readpage IO is done, we need to:
1693  * clear the uptodate bits on error
1694  * set the uptodate bits if things worked
1695  * set the page up to date if all extents in the tree are uptodate
1696  * clear the lock bit in the extent tree
1697  * unlock the page if there are no other extents locked for it
1698  *
1699  * Scheduling is not allowed, so the extent state tree is expected
1700  * to have one and only one object corresponding to this IO.
1701  */
1702 static void end_bio_extent_readpage(struct bio *bio, int err)
1703 {
1704         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1705         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1706         struct extent_io_tree *tree;
1707         u64 start;
1708         u64 end;
1709         int whole_page;
1710         int ret;
1711
1712         if (err)
1713                 uptodate = 0;
1714
1715         do {
1716                 struct page *page = bvec->bv_page;
1717                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1718
1719                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1720                         bvec->bv_offset;
1721                 end = start + bvec->bv_len - 1;
1722
1723                 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1724                         whole_page = 1;
1725                 else
1726                         whole_page = 0;
1727
1728                 if (--bvec >= bio->bi_io_vec)
1729                         prefetchw(&bvec->bv_page->flags);
1730
1731                 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1732                         ret = tree->ops->readpage_end_io_hook(page, start, end,
1733                                                               NULL);
1734                         if (ret)
1735                                 uptodate = 0;
1736                 }
1737                 if (!uptodate && tree->ops &&
1738                     tree->ops->readpage_io_failed_hook) {
1739                         ret = tree->ops->readpage_io_failed_hook(bio, page,
1740                                                          start, end, NULL);
1741                         if (ret == 0) {
1742                                 uptodate =
1743                                         test_bit(BIO_UPTODATE, &bio->bi_flags);
1744                                 if (err)
1745                                         uptodate = 0;
1746                                 continue;
1747                         }
1748                 }
1749
1750                 if (uptodate) {
1751                         set_extent_uptodate(tree, start, end,
1752                                             GFP_ATOMIC);
1753                 }
1754                 unlock_extent(tree, start, end, GFP_ATOMIC);
1755
1756                 if (whole_page) {
1757                         if (uptodate) {
1758                                 SetPageUptodate(page);
1759                         } else {
1760                                 ClearPageUptodate(page);
1761                                 SetPageError(page);
1762                         }
1763                         unlock_page(page);
1764                 } else {
1765                         if (uptodate) {
1766                                 check_page_uptodate(tree, page);
1767                         } else {
1768                                 ClearPageUptodate(page);
1769                                 SetPageError(page);
1770                         }
1771                         check_page_locked(tree, page);
1772                 }
1773         } while (bvec >= bio->bi_io_vec);
1774
1775         bio_put(bio);
1776 }
1777
1778 /*
1779  * IO done from prepare_write is pretty simple, we just unlock
1780  * the structs in the extent tree when done, and set the uptodate bits
1781  * as appropriate.
1782  */
1783 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1784 {
1785         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1786         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1787         struct extent_io_tree *tree;
1788         u64 start;
1789         u64 end;
1790
1791         do {
1792                 struct page *page = bvec->bv_page;
1793                 tree = &BTRFS_I(page->mapping->host)->io_tree;
1794
1795                 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1796                         bvec->bv_offset;
1797                 end = start + bvec->bv_len - 1;
1798
1799                 if (--bvec >= bio->bi_io_vec)
1800                         prefetchw(&bvec->bv_page->flags);
1801
1802                 if (uptodate) {
1803                         set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1804                 } else {
1805                         ClearPageUptodate(page);
1806                         SetPageError(page);
1807                 }
1808
1809                 unlock_extent(tree, start, end, GFP_ATOMIC);
1810
1811         } while (bvec >= bio->bi_io_vec);
1812
1813         bio_put(bio);
1814 }
1815
1816 static struct bio *
1817 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1818                  gfp_t gfp_flags)
1819 {
1820         struct bio *bio;
1821
1822         bio = bio_alloc(gfp_flags, nr_vecs);
1823
1824         if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1825                 while (!bio && (nr_vecs /= 2))
1826                         bio = bio_alloc(gfp_flags, nr_vecs);
1827         }
1828
1829         if (bio) {
1830                 bio->bi_size = 0;
1831                 bio->bi_bdev = bdev;
1832                 bio->bi_sector = first_sector;
1833         }
1834         return bio;
1835 }
1836
1837 static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
1838                           unsigned long bio_flags)
1839 {
1840         int ret = 0;
1841         struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1842         struct page *page = bvec->bv_page;
1843         struct extent_io_tree *tree = bio->bi_private;
1844         u64 start;
1845         u64 end;
1846
1847         start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1848         end = start + bvec->bv_len - 1;
1849
1850         bio->bi_private = NULL;
1851
1852         bio_get(bio);
1853
1854         if (tree->ops && tree->ops->submit_bio_hook)
1855                 tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
1856                                            mirror_num, bio_flags);
1857         else
1858                 submit_bio(rw, bio);
1859         if (bio_flagged(bio, BIO_EOPNOTSUPP))
1860                 ret = -EOPNOTSUPP;
1861         bio_put(bio);
1862         return ret;
1863 }
1864
1865 static int submit_extent_page(int rw, struct extent_io_tree *tree,
1866                               struct page *page, sector_t sector,
1867                               size_t size, unsigned long offset,
1868                               struct block_device *bdev,
1869                               struct bio **bio_ret,
1870                               unsigned long max_pages,
1871                               bio_end_io_t end_io_func,
1872                               int mirror_num,
1873                               unsigned long prev_bio_flags,
1874                               unsigned long bio_flags)
1875 {
1876         int ret = 0;
1877         struct bio *bio;
1878         int nr;
1879         int contig = 0;
1880         int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
1881         int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
1882         size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
1883
1884         if (bio_ret && *bio_ret) {
1885                 bio = *bio_ret;
1886                 if (old_compressed)
1887                         contig = bio->bi_sector == sector;
1888                 else
1889                         contig = bio->bi_sector + (bio->bi_size >> 9) ==
1890                                 sector;
1891
1892                 if (prev_bio_flags != bio_flags || !contig ||
1893                     (tree->ops && tree->ops->merge_bio_hook &&
1894                      tree->ops->merge_bio_hook(page, offset, page_size, bio,
1895                                                bio_flags)) ||
1896                     bio_add_page(bio, page, page_size, offset) < page_size) {
1897                         ret = submit_one_bio(rw, bio, mirror_num,
1898                                              prev_bio_flags);
1899                         bio = NULL;
1900                 } else {
1901                         return 0;
1902                 }
1903         }
1904         if (this_compressed)
1905                 nr = BIO_MAX_PAGES;
1906         else
1907                 nr = bio_get_nr_vecs(bdev);
1908
1909         bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1910
1911         bio_add_page(bio, page, page_size, offset);
1912         bio->bi_end_io = end_io_func;
1913         bio->bi_private = tree;
1914
1915         if (bio_ret)
1916                 *bio_ret = bio;
1917         else
1918                 ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
1919
1920         return ret;
1921 }
1922
1923 void set_page_extent_mapped(struct page *page)
1924 {
1925         if (!PagePrivate(page)) {
1926                 SetPagePrivate(page);
1927                 page_cache_get(page);
1928                 set_page_private(page, EXTENT_PAGE_PRIVATE);
1929         }
1930 }
1931
1932 static void set_page_extent_head(struct page *page, unsigned long len)
1933 {
1934         set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1935 }
1936
1937 /*
1938  * basic readpage implementation.  Locked extent state structs are inserted
1939  * into the tree that are removed when the IO is done (by the end_io
1940  * handlers)
1941  */
1942 static int __extent_read_full_page(struct extent_io_tree *tree,
1943                                    struct page *page,
1944                                    get_extent_t *get_extent,
1945                                    struct bio **bio, int mirror_num,
1946                                    unsigned long *bio_flags)
1947 {
1948         struct inode *inode = page->mapping->host;
1949         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1950         u64 page_end = start + PAGE_CACHE_SIZE - 1;
1951         u64 end;
1952         u64 cur = start;
1953         u64 extent_offset;
1954         u64 last_byte = i_size_read(inode);
1955         u64 block_start;
1956         u64 cur_end;
1957         sector_t sector;
1958         struct extent_map *em;
1959         struct block_device *bdev;
1960         int ret;
1961         int nr = 0;
1962         size_t page_offset = 0;
1963         size_t iosize;
1964         size_t disk_io_size;
1965         size_t blocksize = inode->i_sb->s_blocksize;
1966         unsigned long this_bio_flag = 0;
1967
1968         set_page_extent_mapped(page);
1969
1970         end = page_end;
1971         lock_extent(tree, start, end, GFP_NOFS);
1972
1973         if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
1974                 char *userpage;
1975                 size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
1976
1977                 if (zero_offset) {
1978                         iosize = PAGE_CACHE_SIZE - zero_offset;
1979                         userpage = kmap_atomic(page, KM_USER0);
1980                         memset(userpage + zero_offset, 0, iosize);
1981                         flush_dcache_page(page);
1982                         kunmap_atomic(userpage, KM_USER0);
1983                 }
1984         }
1985         while (cur <= end) {
1986                 if (cur >= last_byte) {
1987                         char *userpage;
1988                         iosize = PAGE_CACHE_SIZE - page_offset;
1989                         userpage = kmap_atomic(page, KM_USER0);
1990                         memset(userpage + page_offset, 0, iosize);
1991                         flush_dcache_page(page);
1992                         kunmap_atomic(userpage, KM_USER0);
1993                         set_extent_uptodate(tree, cur, cur + iosize - 1,
1994                                             GFP_NOFS);
1995                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1996                         break;
1997                 }
1998                 em = get_extent(inode, page, page_offset, cur,
1999                                 end - cur + 1, 0);
2000                 if (IS_ERR(em) || !em) {
2001                         SetPageError(page);
2002                         unlock_extent(tree, cur, end, GFP_NOFS);
2003                         break;
2004                 }
2005                 extent_offset = cur - em->start;
2006                 BUG_ON(extent_map_end(em) <= cur);
2007                 BUG_ON(end < cur);
2008
2009                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2010                         this_bio_flag = EXTENT_BIO_COMPRESSED;
2011
2012                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2013                 cur_end = min(extent_map_end(em) - 1, end);
2014                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2015                 if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
2016                         disk_io_size = em->block_len;
2017                         sector = em->block_start >> 9;
2018                 } else {
2019                         sector = (em->block_start + extent_offset) >> 9;
2020                         disk_io_size = iosize;
2021                 }
2022                 bdev = em->bdev;
2023                 block_start = em->block_start;
2024                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2025                         block_start = EXTENT_MAP_HOLE;
2026                 free_extent_map(em);
2027                 em = NULL;
2028
2029                 /* we've found a hole, just zero and go on */
2030                 if (block_start == EXTENT_MAP_HOLE) {
2031                         char *userpage;
2032                         userpage = kmap_atomic(page, KM_USER0);
2033                         memset(userpage + page_offset, 0, iosize);
2034                         flush_dcache_page(page);
2035                         kunmap_atomic(userpage, KM_USER0);
2036
2037                         set_extent_uptodate(tree, cur, cur + iosize - 1,
2038                                             GFP_NOFS);
2039                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2040                         cur = cur + iosize;
2041                         page_offset += iosize;
2042                         continue;
2043                 }
2044                 /* the get_extent function already copied into the page */
2045                 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
2046                         check_page_uptodate(tree, page);
2047                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2048                         cur = cur + iosize;
2049                         page_offset += iosize;
2050                         continue;
2051                 }
2052                 /* we have an inline extent but it didn't get marked up
2053                  * to date.  Error out
2054                  */
2055                 if (block_start == EXTENT_MAP_INLINE) {
2056                         SetPageError(page);
2057                         unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2058                         cur = cur + iosize;
2059                         page_offset += iosize;
2060                         continue;
2061                 }
2062
2063                 ret = 0;
2064                 if (tree->ops && tree->ops->readpage_io_hook) {
2065                         ret = tree->ops->readpage_io_hook(page, cur,
2066                                                           cur + iosize - 1);
2067                 }
2068                 if (!ret) {
2069                         unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2070                         pnr -= page->index;
2071                         ret = submit_extent_page(READ, tree, page,
2072                                          sector, disk_io_size, page_offset,
2073                                          bdev, bio, pnr,
2074                                          end_bio_extent_readpage, mirror_num,
2075                                          *bio_flags,
2076                                          this_bio_flag);
2077                         nr++;
2078                         *bio_flags = this_bio_flag;
2079                 }
2080                 if (ret)
2081                         SetPageError(page);
2082                 cur = cur + iosize;
2083                 page_offset += iosize;
2084         }
2085         if (!nr) {
2086                 if (!PageError(page))
2087                         SetPageUptodate(page);
2088                 unlock_page(page);
2089         }
2090         return 0;
2091 }
2092
2093 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
2094                             get_extent_t *get_extent)
2095 {
2096         struct bio *bio = NULL;
2097         unsigned long bio_flags = 0;
2098         int ret;
2099
2100         ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
2101                                       &bio_flags);
2102         if (bio)
2103                 submit_one_bio(READ, bio, 0, bio_flags);
2104         return ret;
2105 }
2106
2107 static noinline void update_nr_written(struct page *page,
2108                                       struct writeback_control *wbc,
2109                                       unsigned long nr_written)
2110 {
2111         wbc->nr_to_write -= nr_written;
2112         if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
2113             wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
2114                 page->mapping->writeback_index = page->index + nr_written;
2115 }
2116
2117 /*
2118  * the writepage semantics are similar to regular writepage.  extent
2119  * records are inserted to lock ranges in the tree, and as dirty areas
2120  * are found, they are marked writeback.  Then the lock bits are removed
2121  * and the end_io handler clears the writeback ranges
2122  */
2123 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2124                               void *data)
2125 {
2126         struct inode *inode = page->mapping->host;
2127         struct extent_page_data *epd = data;
2128         struct extent_io_tree *tree = epd->tree;
2129         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2130         u64 delalloc_start;
2131         u64 page_end = start + PAGE_CACHE_SIZE - 1;
2132         u64 end;
2133         u64 cur = start;
2134         u64 extent_offset;
2135         u64 last_byte = i_size_read(inode);
2136         u64 block_start;
2137         u64 iosize;
2138         u64 unlock_start;
2139         sector_t sector;
2140         struct extent_map *em;
2141         struct block_device *bdev;
2142         int ret;
2143         int nr = 0;
2144         size_t pg_offset = 0;
2145         size_t blocksize;
2146         loff_t i_size = i_size_read(inode);
2147         unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
2148         u64 nr_delalloc;
2149         u64 delalloc_end;
2150         int page_started;
2151         int compressed;
2152         int write_flags;
2153         unsigned long nr_written = 0;
2154
2155         if (wbc->sync_mode == WB_SYNC_ALL)
2156                 write_flags = WRITE_SYNC_PLUG;
2157         else
2158                 write_flags = WRITE;
2159
2160         WARN_ON(!PageLocked(page));
2161         pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2162         if (page->index > end_index ||
2163            (page->index == end_index && !pg_offset)) {
2164                 page->mapping->a_ops->invalidatepage(page, 0);
2165                 unlock_page(page);
2166                 return 0;
2167         }
2168
2169         if (page->index == end_index) {
2170                 char *userpage;
2171
2172                 userpage = kmap_atomic(page, KM_USER0);
2173                 memset(userpage + pg_offset, 0,
2174                        PAGE_CACHE_SIZE - pg_offset);
2175                 kunmap_atomic(userpage, KM_USER0);
2176                 flush_dcache_page(page);
2177         }
2178         pg_offset = 0;
2179
2180         set_page_extent_mapped(page);
2181
2182         delalloc_start = start;
2183         delalloc_end = 0;
2184         page_started = 0;
2185         if (!epd->extent_locked) {
2186                 /*
2187                  * make sure the wbc mapping index is at least updated
2188                  * to this page.
2189                  */
2190                 update_nr_written(page, wbc, 0);
2191
2192                 while (delalloc_end < page_end) {
2193                         nr_delalloc = find_lock_delalloc_range(inode, tree,
2194                                                        page,
2195                                                        &delalloc_start,
2196                                                        &delalloc_end,
2197                                                        128 * 1024 * 1024);
2198                         if (nr_delalloc == 0) {
2199                                 delalloc_start = delalloc_end + 1;
2200                                 continue;
2201                         }
2202                         tree->ops->fill_delalloc(inode, page, delalloc_start,
2203                                                  delalloc_end, &page_started,
2204                                                  &nr_written);
2205                         delalloc_start = delalloc_end + 1;
2206                 }
2207
2208                 /* did the fill delalloc function already unlock and start
2209                  * the IO?
2210                  */
2211                 if (page_started) {
2212                         ret = 0;
2213                         /*
2214                          * we've unlocked the page, so we can't update
2215                          * the mapping's writeback index, just update
2216                          * nr_to_write.
2217                          */
2218                         wbc->nr_to_write -= nr_written;
2219                         goto done_unlocked;
2220                 }
2221         }
2222         lock_extent(tree, start, page_end, GFP_NOFS);
2223
2224         unlock_start = start;
2225
2226         if (tree->ops && tree->ops->writepage_start_hook) {
2227                 ret = tree->ops->writepage_start_hook(page, start,
2228                                                       page_end);
2229                 if (ret == -EAGAIN) {
2230                         unlock_extent(tree, start, page_end, GFP_NOFS);
2231                         redirty_page_for_writepage(wbc, page);
2232                         update_nr_written(page, wbc, nr_written);
2233                         unlock_page(page);
2234                         ret = 0;
2235                         goto done_unlocked;
2236                 }
2237         }
2238
2239         /*
2240          * we don't want to touch the inode after unlocking the page,
2241          * so we update the mapping writeback index now
2242          */
2243         update_nr_written(page, wbc, nr_written + 1);
2244
2245         end = page_end;
2246         if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
2247                 printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
2248
2249         if (last_byte <= start) {
2250                 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
2251                 unlock_extent(tree, start, page_end, GFP_NOFS);
2252                 if (tree->ops && tree->ops->writepage_end_io_hook)
2253                         tree->ops->writepage_end_io_hook(page, start,
2254                                                          page_end, NULL, 1);
2255                 unlock_start = page_end + 1;
2256                 goto done;
2257         }
2258
2259         set_extent_uptodate(tree, start, page_end, GFP_NOFS);
2260         blocksize = inode->i_sb->s_blocksize;
2261
2262         while (cur <= end) {
2263                 if (cur >= last_byte) {
2264                         clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
2265                         unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2266                         if (tree->ops && tree->ops->writepage_end_io_hook)
2267                                 tree->ops->writepage_end_io_hook(page, cur,
2268                                                          page_end, NULL, 1);
2269                         unlock_start = page_end + 1;
2270                         break;
2271                 }
2272                 em = epd->get_extent(inode, page, pg_offset, cur,
2273                                      end - cur + 1, 1);
2274                 if (IS_ERR(em) || !em) {
2275                         SetPageError(page);
2276                         break;
2277                 }
2278
2279                 extent_offset = cur - em->start;
2280                 BUG_ON(extent_map_end(em) <= cur);
2281                 BUG_ON(end < cur);
2282                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
2283                 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
2284                 sector = (em->block_start + extent_offset) >> 9;
2285                 bdev = em->bdev;
2286                 block_start = em->block_start;
2287                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
2288                 free_extent_map(em);
2289                 em = NULL;
2290
2291                 /*
2292                  * compressed and inline extents are written through other
2293                  * paths in the FS
2294                  */
2295                 if (compressed || block_start == EXTENT_MAP_HOLE ||
2296                     block_start == EXTENT_MAP_INLINE) {
2297                         clear_extent_dirty(tree, cur,
2298                                            cur + iosize - 1, GFP_NOFS);
2299
2300                         unlock_extent(tree, unlock_start, cur + iosize - 1,
2301                                       GFP_NOFS);
2302
2303                         /*
2304                          * end_io notification does not happen here for
2305                          * compressed extents
2306                          */
2307                         if (!compressed && tree->ops &&
2308                             tree->ops->writepage_end_io_hook)
2309                                 tree->ops->writepage_end_io_hook(page, cur,
2310                                                          cur + iosize - 1,
2311                                                          NULL, 1);
2312                         else if (compressed) {
2313                                 /* we don't want to end_page_writeback on
2314                                  * a compressed extent.  this happens
2315                                  * elsewhere
2316                                  */
2317                                 nr++;
2318                         }
2319
2320                         cur += iosize;
2321                         pg_offset += iosize;
2322                         unlock_start = cur;
2323                         continue;
2324                 }
2325                 /* leave this out until we have a page_mkwrite call */
2326                 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2327                                    EXTENT_DIRTY, 0)) {
2328                         cur = cur + iosize;
2329                         pg_offset += iosize;
2330                         continue;
2331                 }
2332
2333                 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
2334                 if (tree->ops && tree->ops->writepage_io_hook) {
2335                         ret = tree->ops->writepage_io_hook(page, cur,
2336                                                 cur + iosize - 1);
2337                 } else {
2338                         ret = 0;
2339                 }
2340                 if (ret) {
2341                         SetPageError(page);
2342                 } else {
2343                         unsigned long max_nr = end_index + 1;
2344
2345                         set_range_writeback(tree, cur, cur + iosize - 1);
2346                         if (!PageWriteback(page)) {
2347                                 printk(KERN_ERR "btrfs warning page %lu not "
2348                                        "writeback, cur %llu end %llu\n",
2349                                        page->index, (unsigned long long)cur,
2350                                        (unsigned long long)end);
2351                         }
2352
2353                         ret = submit_extent_page(write_flags, tree, page,
2354                                                  sector, iosize, pg_offset,
2355                                                  bdev, &epd->bio, max_nr,
2356                                                  end_bio_extent_writepage,
2357                                                  0, 0, 0);
2358                         if (ret)
2359                                 SetPageError(page);
2360                 }
2361                 cur = cur + iosize;
2362                 pg_offset += iosize;
2363                 nr++;
2364         }
2365 done:
2366         if (nr == 0) {
2367                 /* make sure the mapping tag for page dirty gets cleared */
2368                 set_page_writeback(page);
2369                 end_page_writeback(page);
2370         }
2371         if (unlock_start <= page_end)
2372                 unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
2373         unlock_page(page);
2374
2375 done_unlocked:
2376
2377         return 0;
2378 }
2379
2380 /**
2381  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2382  * @mapping: address space structure to write
2383  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2384  * @writepage: function called for each page
2385  * @data: data passed to writepage function
2386  *
2387  * If a page is already under I/O, write_cache_pages() skips it, even
2388  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2389  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2390  * and msync() need to guarantee that all the data which was dirty at the time
2391  * the call was made get new I/O started against them.  If wbc->sync_mode is
2392  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2393  * existing IO to complete.
2394  */
2395 static int extent_write_cache_pages(struct extent_io_tree *tree,
2396                              struct address_space *mapping,
2397                              struct writeback_control *wbc,
2398                              writepage_t writepage, void *data,
2399                              void (*flush_fn)(void *))
2400 {
2401         struct backing_dev_info *bdi = mapping->backing_dev_info;
2402         int ret = 0;
2403         int done = 0;
2404         struct pagevec pvec;
2405         int nr_pages;
2406         pgoff_t index;
2407         pgoff_t end;            /* Inclusive */
2408         int scanned = 0;
2409         int range_whole = 0;
2410
2411         pagevec_init(&pvec, 0);
2412         if (wbc->range_cyclic) {
2413                 index = mapping->writeback_index; /* Start from prev offset */
2414                 end = -1;
2415         } else {
2416                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2417                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2418                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2419                         range_whole = 1;
2420                 scanned = 1;
2421         }
2422 retry:
2423         while (!done && (index <= end) &&
2424                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2425                               PAGECACHE_TAG_DIRTY, min(end - index,
2426                                   (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2427                 unsigned i;
2428
2429                 scanned = 1;
2430                 for (i = 0; i < nr_pages; i++) {
2431                         struct page *page = pvec.pages[i];
2432
2433                         /*
2434                          * At this point we hold neither mapping->tree_lock nor
2435                          * lock on the page itself: the page may be truncated or
2436                          * invalidated (changing page->mapping to NULL), or even
2437                          * swizzled back from swapper_space to tmpfs file
2438                          * mapping
2439                          */
2440                         if (tree->ops && tree->ops->write_cache_pages_lock_hook)
2441                                 tree->ops->write_cache_pages_lock_hook(page);
2442                         else
2443                                 lock_page(page);
2444
2445                         if (unlikely(page->mapping != mapping)) {
2446                                 unlock_page(page);
2447                                 continue;
2448                         }
2449
2450                         if (!wbc->range_cyclic && page->index > end) {
2451                                 done = 1;
2452                                 unlock_page(page);
2453                                 continue;
2454                         }
2455
2456                         if (wbc->sync_mode != WB_SYNC_NONE) {
2457                                 if (PageWriteback(page))
2458                                         flush_fn(data);
2459                                 wait_on_page_writeback(page);
2460                         }
2461
2462                         if (PageWriteback(page) ||
2463                             !clear_page_dirty_for_io(page)) {
2464                                 unlock_page(page);
2465                                 continue;
2466                         }
2467
2468                         ret = (*writepage)(page, wbc, data);
2469
2470                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2471                                 unlock_page(page);
2472                                 ret = 0;
2473                         }
2474                         if (ret || wbc->nr_to_write <= 0)
2475                                 done = 1;
2476                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
2477                                 wbc->encountered_congestion = 1;
2478                                 done = 1;
2479                         }
2480                 }
2481                 pagevec_release(&pvec);
2482                 cond_resched();
2483         }
2484         if (!scanned && !done) {
2485                 /*
2486                  * We hit the last page and there is more work to be done: wrap
2487                  * back to the start of the file
2488                  */
2489                 scanned = 1;
2490                 index = 0;
2491                 goto retry;
2492         }
2493         return ret;
2494 }
2495
2496 static void flush_epd_write_bio(struct extent_page_data *epd)
2497 {
2498         if (epd->bio) {
2499                 if (epd->sync_io)
2500                         submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
2501                 else
2502                         submit_one_bio(WRITE, epd->bio, 0, 0);
2503                 epd->bio = NULL;
2504         }
2505 }
2506
2507 static noinline void flush_write_bio(void *data)
2508 {
2509         struct extent_page_data *epd = data;
2510         flush_epd_write_bio(epd);
2511 }
2512
2513 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
2514                           get_extent_t *get_extent,
2515                           struct writeback_control *wbc)
2516 {
2517         int ret;
2518         struct address_space *mapping = page->mapping;
2519         struct extent_page_data epd = {
2520                 .bio = NULL,
2521                 .tree = tree,
2522                 .get_extent = get_extent,
2523                 .extent_locked = 0,
2524                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2525         };
2526         struct writeback_control wbc_writepages = {
2527                 .bdi            = wbc->bdi,
2528                 .sync_mode      = wbc->sync_mode,
2529                 .older_than_this = NULL,
2530                 .nr_to_write    = 64,
2531                 .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
2532                 .range_end      = (loff_t)-1,
2533         };
2534
2535         ret = __extent_writepage(page, wbc, &epd);
2536
2537         extent_write_cache_pages(tree, mapping, &wbc_writepages,
2538                                  __extent_writepage, &epd, flush_write_bio);
2539         flush_epd_write_bio(&epd);
2540         return ret;
2541 }
2542
2543 int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
2544                               u64 start, u64 end, get_extent_t *get_extent,
2545                               int mode)
2546 {
2547         int ret = 0;
2548         struct address_space *mapping = inode->i_mapping;
2549         struct page *page;
2550         unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >>
2551                 PAGE_CACHE_SHIFT;
2552
2553         struct extent_page_data epd = {
2554                 .bio = NULL,
2555                 .tree = tree,
2556                 .get_extent = get_extent,
2557                 .extent_locked = 1,
2558                 .sync_io = mode == WB_SYNC_ALL,
2559         };
2560         struct writeback_control wbc_writepages = {
2561                 .bdi            = inode->i_mapping->backing_dev_info,
2562                 .sync_mode      = mode,
2563                 .older_than_this = NULL,
2564                 .nr_to_write    = nr_pages * 2,
2565                 .range_start    = start,
2566                 .range_end      = end + 1,
2567         };
2568
2569         while (start <= end) {
2570                 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
2571                 if (clear_page_dirty_for_io(page))
2572                         ret = __extent_writepage(page, &wbc_writepages, &epd);
2573                 else {
2574                         if (tree->ops && tree->ops->writepage_end_io_hook)
2575                                 tree->ops->writepage_end_io_hook(page, start,
2576                                                  start + PAGE_CACHE_SIZE - 1,
2577                                                  NULL, 1);
2578                         unlock_page(page);
2579                 }
2580                 page_cache_release(page);
2581                 start += PAGE_CACHE_SIZE;
2582         }
2583
2584         flush_epd_write_bio(&epd);
2585         return ret;
2586 }
2587
2588 int extent_writepages(struct extent_io_tree *tree,
2589                       struct address_space *mapping,
2590                       get_extent_t *get_extent,
2591                       struct writeback_control *wbc)
2592 {
2593         int ret = 0;
2594         struct extent_page_data epd = {
2595                 .bio = NULL,
2596                 .tree = tree,
2597                 .get_extent = get_extent,
2598                 .extent_locked = 0,
2599                 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
2600         };
2601
2602         ret = extent_write_cache_pages(tree, mapping, wbc,
2603                                        __extent_writepage, &epd,
2604                                        flush_write_bio);
2605         flush_epd_write_bio(&epd);
2606         return ret;
2607 }
2608
2609 int extent_readpages(struct extent_io_tree *tree,
2610                      struct address_space *mapping,
2611                      struct list_head *pages, unsigned nr_pages,
2612                      get_extent_t get_extent)
2613 {
2614         struct bio *bio = NULL;
2615         unsigned page_idx;
2616         struct pagevec pvec;
2617         unsigned long bio_flags = 0;
2618
2619         pagevec_init(&pvec, 0);
2620         for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2621                 struct page *page = list_entry(pages->prev, struct page, lru);
2622
2623                 prefetchw(&page->flags);
2624                 list_del(&page->lru);
2625                 /*
2626                  * what we want to do here is call add_to_page_cache_lru,
2627                  * but that isn't exported, so we reproduce it here
2628                  */
2629                 if (!add_to_page_cache(page, mapping,
2630                                         page->index, GFP_KERNEL)) {
2631
2632                         /* open coding of lru_cache_add, also not exported */
2633                         page_cache_get(page);
2634                         if (!pagevec_add(&pvec, page))
2635                                 __pagevec_lru_add_file(&pvec);
2636                         __extent_read_full_page(tree, page, get_extent,
2637                                                 &bio, 0, &bio_flags);
2638                 }
2639                 page_cache_release(page);
2640         }
2641         if (pagevec_count(&pvec))
2642                 __pagevec_lru_add_file(&pvec);
2643         BUG_ON(!list_empty(pages));
2644         if (bio)
2645                 submit_one_bio(READ, bio, 0, bio_flags);
2646         return 0;
2647 }
2648
2649 /*
2650  * basic invalidatepage code, this waits on any locked or writeback
2651  * ranges corresponding to the page, and then deletes any extent state
2652  * records from the tree
2653  */
2654 int extent_invalidatepage(struct extent_io_tree *tree,
2655                           struct page *page, unsigned long offset)
2656 {
2657         u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2658         u64 end = start + PAGE_CACHE_SIZE - 1;
2659         size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2660
2661         start += (offset + blocksize - 1) & ~(blocksize - 1);
2662         if (start > end)
2663                 return 0;
2664
2665         lock_extent(tree, start, end, GFP_NOFS);
2666         wait_on_extent_writeback(tree, start, end);
2667         clear_extent_bit(tree, start, end,
2668                          EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2669                          1, 1, GFP_NOFS);
2670         return 0;
2671 }
2672
2673 /*
2674  * simple commit_write call, set_range_dirty is used to mark both
2675  * the pages and the extent records as dirty
2676  */
2677 int extent_commit_write(struct extent_io_tree *tree,
2678                         struct inode *inode, struct page *page,
2679                         unsigned from, unsigned to)
2680 {
2681         loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2682
2683         set_page_extent_mapped(page);
2684         set_page_dirty(page);
2685
2686         if (pos > inode->i_size) {
2687                 i_size_write(inode, pos);
2688                 mark_inode_dirty(inode);
2689         }
2690         return 0;
2691 }
2692
2693 int extent_prepare_write(struct extent_io_tree *tree,
2694                          struct inode *inode, struct page *page,
2695                          unsigned from, unsigned to, get_extent_t *get_extent)
2696 {
2697         u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2698         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2699         u64 block_start;
2700         u64 orig_block_start;
2701         u64 block_end;
2702         u64 cur_end;
2703         struct extent_map *em;
2704         unsigned blocksize = 1 << inode->i_blkbits;
2705         size_t page_offset = 0;
2706         size_t block_off_start;
2707         size_t block_off_end;
2708         int err = 0;
2709         int iocount = 0;
2710         int ret = 0;
2711         int isnew;
2712
2713         set_page_extent_mapped(page);
2714
2715         block_start = (page_start + from) & ~((u64)blocksize - 1);
2716         block_end = (page_start + to - 1) | (blocksize - 1);
2717         orig_block_start = block_start;
2718
2719         lock_extent(tree, page_start, page_end, GFP_NOFS);
2720         while (block_start <= block_end) {
2721                 em = get_extent(inode, page, page_offset, block_start,
2722                                 block_end - block_start + 1, 1);
2723                 if (IS_ERR(em) || !em)
2724                         goto err;
2725
2726                 cur_end = min(block_end, extent_map_end(em) - 1);
2727                 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2728                 block_off_end = block_off_start + blocksize;
2729                 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2730
2731                 if (!PageUptodate(page) && isnew &&
2732                     (block_off_end > to || block_off_start < from)) {
2733                         void *kaddr;
2734
2735                         kaddr = kmap_atomic(page, KM_USER0);
2736                         if (block_off_end > to)
2737                                 memset(kaddr + to, 0, block_off_end - to);
2738                         if (block_off_start < from)
2739                                 memset(kaddr + block_off_start, 0,
2740                                        from - block_off_start);
2741                         flush_dcache_page(page);
2742                         kunmap_atomic(kaddr, KM_USER0);
2743                 }
2744                 if ((em->block_start != EXTENT_MAP_HOLE &&
2745                      em->block_start != EXTENT_MAP_INLINE) &&
2746                     !isnew && !PageUptodate(page) &&
2747                     (block_off_end > to || block_off_start < from) &&
2748                     !test_range_bit(tree, block_start, cur_end,
2749                                     EXTENT_UPTODATE, 1)) {
2750                         u64 sector;
2751                         u64 extent_offset = block_start - em->start;
2752                         size_t iosize;
2753                         sector = (em->block_start + extent_offset) >> 9;
2754                         iosize = (cur_end - block_start + blocksize) &
2755                                 ~((u64)blocksize - 1);
2756                         /*
2757                          * we've already got the extent locked, but we
2758                          * need to split the state such that our end_bio
2759                          * handler can clear the lock.
2760                          */
2761                         set_extent_bit(tree, block_start,
2762                                        block_start + iosize - 1,
2763                                        EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2764                         ret = submit_extent_page(READ, tree, page,
2765                                          sector, iosize, page_offset, em->bdev,
2766                                          NULL, 1,
2767                                          end_bio_extent_preparewrite, 0,
2768                                          0, 0);
2769                         iocount++;
2770                         block_start = block_start + iosize;
2771                 } else {
2772                         set_extent_uptodate(tree, block_start, cur_end,
2773                                             GFP_NOFS);
2774                         unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2775                         block_start = cur_end + 1;
2776                 }
2777                 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2778                 free_extent_map(em);
2779         }
2780         if (iocount) {
2781                 wait_extent_bit(tree, orig_block_start,
2782                                 block_end, EXTENT_LOCKED);
2783         }
2784         check_page_uptodate(tree, page);
2785 err:
2786         /* FIXME, zero out newly allocated blocks on error */
2787         return err;
2788 }
2789
2790 /*
2791  * a helper for releasepage, this tests for areas of the page that
2792  * are locked or under IO and drops the related state bits if it is safe
2793  * to drop the page.
2794  */
2795 int try_release_extent_state(struct extent_map_tree *map,
2796                              struct extent_io_tree *tree, struct page *page,
2797                              gfp_t mask)
2798 {
2799         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2800         u64 end = start + PAGE_CACHE_SIZE - 1;
2801         int ret = 1;
2802
2803         if (test_range_bit(tree, start, end,
2804                            EXTENT_IOBITS | EXTENT_ORDERED, 0))
2805                 ret = 0;
2806         else {
2807                 if ((mask & GFP_NOFS) == GFP_NOFS)
2808                         mask = GFP_NOFS;
2809                 clear_extent_bit(tree, start, end, EXTENT_UPTODATE,
2810                                  1, 1, mask);
2811         }
2812         return ret;
2813 }
2814
2815 /*
2816  * a helper for releasepage.  As long as there are no locked extents
2817  * in the range corresponding to the page, both state records and extent
2818  * map records are removed
2819  */
2820 int try_release_extent_mapping(struct extent_map_tree *map,
2821                                struct extent_io_tree *tree, struct page *page,
2822                                gfp_t mask)
2823 {
2824         struct extent_map *em;
2825         u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2826         u64 end = start + PAGE_CACHE_SIZE - 1;
2827
2828         if ((mask & __GFP_WAIT) &&
2829             page->mapping->host->i_size > 16 * 1024 * 1024) {
2830                 u64 len;
2831                 while (start <= end) {
2832                         len = end - start + 1;
2833                         spin_lock(&map->lock);
2834                         em = lookup_extent_mapping(map, start, len);
2835                         if (!em || IS_ERR(em)) {
2836                                 spin_unlock(&map->lock);
2837                                 break;
2838                         }
2839                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2840                             em->start != start) {
2841                                 spin_unlock(&map->lock);
2842                                 free_extent_map(em);
2843                                 break;
2844                         }
2845                         if (!test_range_bit(tree, em->start,
2846                                             extent_map_end(em) - 1,
2847                                             EXTENT_LOCKED | EXTENT_WRITEBACK |
2848                                             EXTENT_ORDERED,
2849                                             0)) {
2850                                 remove_extent_mapping(map, em);
2851                                 /* once for the rb tree */
2852                                 free_extent_map(em);
2853                         }
2854                         start = extent_map_end(em);
2855                         spin_unlock(&map->lock);
2856
2857                         /* once for us */
2858                         free_extent_map(em);
2859                 }
2860         }
2861         return try_release_extent_state(map, tree, page, mask);
2862 }
2863
2864 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2865                 get_extent_t *get_extent)
2866 {
2867         struct inode *inode = mapping->host;
2868         u64 start = iblock << inode->i_blkbits;
2869         sector_t sector = 0;
2870         size_t blksize = (1 << inode->i_blkbits);
2871         struct extent_map *em;
2872
2873         lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2874                     GFP_NOFS);
2875         em = get_extent(inode, NULL, 0, start, blksize, 0);
2876         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2877                       GFP_NOFS);
2878         if (!em || IS_ERR(em))
2879                 return 0;
2880
2881         if (em->block_start > EXTENT_MAP_LAST_BYTE)
2882                 goto out;
2883
2884         sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2885 out:
2886         free_extent_map(em);
2887         return sector;
2888 }
2889
2890 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2891                 __u64 start, __u64 len, get_extent_t *get_extent)
2892 {
2893         int ret;
2894         u64 off = start;
2895         u64 max = start + len;
2896         u32 flags = 0;
2897         u64 disko = 0;
2898         struct extent_map *em = NULL;
2899         int end = 0;
2900         u64 em_start = 0, em_len = 0;
2901         unsigned long emflags;
2902         ret = 0;
2903
2904         if (len == 0)
2905                 return -EINVAL;
2906
2907         lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2908                 GFP_NOFS);
2909         em = get_extent(inode, NULL, 0, off, max - off, 0);
2910         if (!em)
2911                 goto out;
2912         if (IS_ERR(em)) {
2913                 ret = PTR_ERR(em);
2914                 goto out;
2915         }
2916         while (!end) {
2917                 off = em->start + em->len;
2918                 if (off >= max)
2919                         end = 1;
2920
2921                 em_start = em->start;
2922                 em_len = em->len;
2923
2924                 disko = 0;
2925                 flags = 0;
2926
2927                 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2928                         end = 1;
2929                         flags |= FIEMAP_EXTENT_LAST;
2930                 } else if (em->block_start == EXTENT_MAP_HOLE) {
2931                         flags |= FIEMAP_EXTENT_UNWRITTEN;
2932                 } else if (em->block_start == EXTENT_MAP_INLINE) {
2933                         flags |= (FIEMAP_EXTENT_DATA_INLINE |
2934                                   FIEMAP_EXTENT_NOT_ALIGNED);
2935                 } else if (em->block_start == EXTENT_MAP_DELALLOC) {
2936                         flags |= (FIEMAP_EXTENT_DELALLOC |
2937                                   FIEMAP_EXTENT_UNKNOWN);
2938                 } else {
2939                         disko = em->block_start;
2940                 }
2941                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2942                         flags |= FIEMAP_EXTENT_ENCODED;
2943
2944                 emflags = em->flags;
2945                 free_extent_map(em);
2946                 em = NULL;
2947
2948                 if (!end) {
2949                         em = get_extent(inode, NULL, 0, off, max - off, 0);
2950                         if (!em)
2951                                 goto out;
2952                         if (IS_ERR(em)) {
2953                                 ret = PTR_ERR(em);
2954                                 goto out;
2955                         }
2956                         emflags = em->flags;
2957                 }
2958                 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2959                         flags |= FIEMAP_EXTENT_LAST;
2960                         end = 1;
2961                 }
2962
2963                 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2964                                         em_len, flags);
2965                 if (ret)
2966                         goto out_free;
2967         }
2968 out_free:
2969         free_extent_map(em);
2970 out:
2971         unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
2972                         GFP_NOFS);
2973         return ret;
2974 }
2975
2976 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2977                                               unsigned long i)
2978 {
2979         struct page *p;
2980         struct address_space *mapping;
2981
2982         if (i == 0)
2983                 return eb->first_page;
2984         i += eb->start >> PAGE_CACHE_SHIFT;
2985         mapping = eb->first_page->mapping;
2986         if (!mapping)
2987                 return NULL;
2988
2989         /*
2990          * extent_buffer_page is only called after pinning the page
2991          * by increasing the reference count.  So we know the page must
2992          * be in the radix tree.
2993          */
2994         rcu_read_lock();
2995         p = radix_tree_lookup(&mapping->page_tree, i);
2996         rcu_read_unlock();
2997
2998         return p;
2999 }
3000
3001 static inline unsigned long num_extent_pages(u64 start, u64 len)
3002 {
3003         return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
3004                 (start >> PAGE_CACHE_SHIFT);
3005 }
3006
3007 static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3008                                                    u64 start,
3009                                                    unsigned long len,
3010                                                    gfp_t mask)
3011 {
3012         struct extent_buffer *eb = NULL;
3013 #if LEAK_DEBUG
3014         unsigned long flags;
3015 #endif
3016
3017         eb = kmem_cache_zalloc(extent_buffer_cache, mask);
3018         eb->start = start;
3019         eb->len = len;
3020         spin_lock_init(&eb->lock);
3021         init_waitqueue_head(&eb->lock_wq);
3022
3023 #if LEAK_DEBUG
3024         spin_lock_irqsave(&leak_lock, flags);
3025         list_add(&eb->leak_list, &buffers);
3026         spin_unlock_irqrestore(&leak_lock, flags);
3027 #endif
3028         atomic_set(&eb->refs, 1);
3029
3030         return eb;
3031 }
3032
3033 static void __free_extent_buffer(struct extent_buffer *eb)
3034 {
3035 #if LEAK_DEBUG
3036         unsigned long flags;
3037         spin_lock_irqsave(&leak_lock, flags);
3038         list_del(&eb->leak_list);
3039         spin_unlock_irqrestore(&leak_lock, flags);
3040 #endif
3041         kmem_cache_free(extent_buffer_cache, eb);
3042 }
3043
3044 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3045                                           u64 start, unsigned long len,
3046                                           struct page *page0,
3047                                           gfp_t mask)
3048 {
3049         unsigned long num_pages = num_extent_pages(start, len);
3050         unsigned long i;
3051         unsigned long index = start >> PAGE_CACHE_SHIFT;
3052         struct extent_buffer *eb;
3053         struct extent_buffer *exists = NULL;
3054         struct page *p;
3055         struct address_space *mapping = tree->mapping;
3056         int uptodate = 1;
3057
3058         spin_lock(&tree->buffer_lock);
3059         eb = buffer_search(tree, start);
3060         if (eb) {
3061                 atomic_inc(&eb->refs);
3062                 spin_unlock(&tree->buffer_lock);
3063                 mark_page_accessed(eb->first_page);
3064                 return eb;
3065         }
3066         spin_unlock(&tree->buffer_lock);
3067
3068         eb = __alloc_extent_buffer(tree, start, len, mask);
3069         if (!eb)
3070                 return NULL;
3071
3072         if (page0) {
3073                 eb->first_page = page0;
3074                 i = 1;
3075                 index++;
3076                 page_cache_get(page0);
3077                 mark_page_accessed(page0);
3078                 set_page_extent_mapped(page0);
3079                 set_page_extent_head(page0, len);
3080                 uptodate = PageUptodate(page0);
3081         } else {
3082                 i = 0;
3083         }
3084         for (; i < num_pages; i++, index++) {
3085                 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
3086                 if (!p) {
3087                         WARN_ON(1);
3088                         goto free_eb;
3089                 }
3090                 set_page_extent_mapped(p);
3091                 mark_page_accessed(p);
3092                 if (i == 0) {
3093                         eb->first_page = p;
3094                         set_page_extent_head(p, len);
3095                 } else {
3096                         set_page_private(p, EXTENT_PAGE_PRIVATE);
3097                 }
3098                 if (!PageUptodate(p))
3099                         uptodate = 0;
3100                 unlock_page(p);
3101         }
3102         if (uptodate)
3103                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3104
3105         spin_lock(&tree->buffer_lock);
3106         exists = buffer_tree_insert(tree, start, &eb->rb_node);
3107         if (exists) {
3108                 /* add one reference for the caller */
3109                 atomic_inc(&exists->refs);
3110                 spin_unlock(&tree->buffer_lock);
3111                 goto free_eb;
3112         }
3113         spin_unlock(&tree->buffer_lock);
3114
3115         /* add one reference for the tree */
3116         atomic_inc(&eb->refs);
3117         return eb;
3118
3119 free_eb:
3120         if (!atomic_dec_and_test(&eb->refs))
3121                 return exists;
3122         for (index = 1; index < i; index++)
3123                 page_cache_release(extent_buffer_page(eb, index));
3124         page_cache_release(extent_buffer_page(eb, 0));
3125         __free_extent_buffer(eb);
3126         return exists;
3127 }
3128
3129 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3130                                          u64 start, unsigned long len,
3131                                           gfp_t mask)
3132 {
3133         struct extent_buffer *eb;
3134
3135         spin_lock(&tree->buffer_lock);
3136         eb = buffer_search(tree, start);
3137         if (eb)
3138                 atomic_inc(&eb->refs);
3139         spin_unlock(&tree->buffer_lock);
3140
3141         if (eb)
3142                 mark_page_accessed(eb->first_page);
3143
3144         return eb;
3145 }
3146
3147 void free_extent_buffer(struct extent_buffer *eb)
3148 {
3149         if (!eb)
3150                 return;
3151
3152         if (!atomic_dec_and_test(&eb->refs))
3153                 return;
3154
3155         WARN_ON(1);
3156 }
3157
3158 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3159                               struct extent_buffer *eb)
3160 {
3161         unsigned long i;
3162         unsigned long num_pages;
3163         struct page *page;
3164
3165         num_pages = num_extent_pages(eb->start, eb->len);
3166
3167         for (i = 0; i < num_pages; i++) {
3168                 page = extent_buffer_page(eb, i);
3169                 if (!PageDirty(page))
3170                         continue;
3171
3172                 lock_page(page);
3173                 if (i == 0)
3174                         set_page_extent_head(page, eb->len);
3175                 else
3176                         set_page_private(page, EXTENT_PAGE_PRIVATE);
3177
3178                 clear_page_dirty_for_io(page);
3179                 spin_lock_irq(&page->mapping->tree_lock);
3180                 if (!PageDirty(page)) {
3181                         radix_tree_tag_clear(&page->mapping->page_tree,
3182                                                 page_index(page),
3183                                                 PAGECACHE_TAG_DIRTY);
3184                 }
3185                 spin_unlock_irq(&page->mapping->tree_lock);
3186                 unlock_page(page);
3187         }
3188         return 0;
3189 }
3190
3191 int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3192                                     struct extent_buffer *eb)
3193 {
3194         return wait_on_extent_writeback(tree, eb->start,
3195                                         eb->start + eb->len - 1);
3196 }
3197
3198 int set_extent_buffer_dirty(struct extent_io_tree *tree,
3199                              struct extent_buffer *eb)
3200 {
3201         unsigned long i;
3202         unsigned long num_pages;
3203         int was_dirty = 0;
3204
3205         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3206         num_pages = num_extent_pages(eb->start, eb->len);
3207         for (i = 0; i < num_pages; i++)
3208                 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
3209         return was_dirty;
3210 }
3211
3212 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3213                                 struct extent_buffer *eb)
3214 {
3215         unsigned long i;
3216         struct page *page;
3217         unsigned long num_pages;
3218
3219         num_pages = num_extent_pages(eb->start, eb->len);
3220         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3221
3222         clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3223                               GFP_NOFS);
3224         for (i = 0; i < num_pages; i++) {
3225                 page = extent_buffer_page(eb, i);
3226                 if (page)
3227                         ClearPageUptodate(page);
3228         }
3229         return 0;
3230 }
3231
3232 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
3233                                 struct extent_buffer *eb)
3234 {
3235         unsigned long i;
3236         struct page *page;
3237         unsigned long num_pages;
3238
3239         num_pages = num_extent_pages(eb->start, eb->len);
3240
3241         set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3242                             GFP_NOFS);
3243         for (i = 0; i < num_pages; i++) {
3244                 page = extent_buffer_page(eb, i);
3245                 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
3246                     ((i == num_pages - 1) &&
3247                      ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
3248                         check_page_uptodate(tree, page);
3249                         continue;
3250                 }
3251                 SetPageUptodate(page);
3252         }
3253         return 0;
3254 }
3255
3256 int extent_range_uptodate(struct extent_io_tree *tree,
3257                           u64 start, u64 end)
3258 {
3259         struct page *page;
3260         int ret;
3261         int pg_uptodate = 1;
3262         int uptodate;
3263         unsigned long index;
3264
3265         ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
3266         if (ret)
3267                 return 1;
3268         while (start <= end) {
3269                 index = start >> PAGE_CACHE_SHIFT;
3270                 page = find_get_page(tree->mapping, index);
3271                 uptodate = PageUptodate(page);
3272                 page_cache_release(page);
3273                 if (!uptodate) {
3274                         pg_uptodate = 0;
3275                         break;
3276                 }
3277                 start += PAGE_CACHE_SIZE;
3278         }
3279         return pg_uptodate;
3280 }
3281
3282 int extent_buffer_uptodate(struct extent_io_tree *tree,
3283                            struct extent_buffer *eb)
3284 {
3285         int ret = 0;
3286         unsigned long num_pages;
3287         unsigned long i;
3288         struct page *page;
3289         int pg_uptodate = 1;
3290
3291         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3292                 return 1;
3293
3294         ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3295                            EXTENT_UPTODATE, 1);
3296         if (ret)
3297                 return ret;
3298
3299         num_pages = num_extent_pages(eb->start, eb->len);
3300         for (i = 0; i < num_pages; i++) {
3301                 page = extent_buffer_page(eb, i);
3302                 if (!PageUptodate(page)) {
3303                         pg_uptodate = 0;
3304                         break;
3305                 }
3306         }
3307         return pg_uptodate;
3308 }
3309
3310 int read_extent_buffer_pages(struct extent_io_tree *tree,
3311                              struct extent_buffer *eb,
3312                              u64 start, int wait,
3313                              get_extent_t *get_extent, int mirror_num)
3314 {
3315         unsigned long i;
3316         unsigned long start_i;
3317         struct page *page;
3318         int err;
3319         int ret = 0;
3320         int locked_pages = 0;
3321         int all_uptodate = 1;
3322         int inc_all_pages = 0;
3323         unsigned long num_pages;
3324         struct bio *bio = NULL;
3325         unsigned long bio_flags = 0;
3326
3327         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3328                 return 0;
3329
3330         if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
3331                            EXTENT_UPTODATE, 1)) {
3332                 return 0;
3333         }
3334
3335         if (start) {
3336                 WARN_ON(start < eb->start);
3337                 start_i = (start >> PAGE_CACHE_SHIFT) -
3338                         (eb->start >> PAGE_CACHE_SHIFT);
3339         } else {
3340                 start_i = 0;
3341         }
3342
3343         num_pages = num_extent_pages(eb->start, eb->len);
3344         for (i = start_i; i < num_pages; i++) {
3345                 page = extent_buffer_page(eb, i);
3346                 if (!wait) {
3347                         if (!trylock_page(page))
3348                                 goto unlock_exit;
3349                 } else {
3350                         lock_page(page);
3351                 }
3352                 locked_pages++;
3353                 if (!PageUptodate(page))
3354                         all_uptodate = 0;
3355         }
3356         if (all_uptodate) {
3357                 if (start_i == 0)
3358                         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3359                 goto unlock_exit;
3360         }
3361
3362         for (i = start_i; i < num_pages; i++) {
3363                 page = extent_buffer_page(eb, i);
3364                 if (inc_all_pages)
3365                         page_cache_get(page);
3366                 if (!PageUptodate(page)) {
3367                         if (start_i == 0)
3368                                 inc_all_pages = 1;
3369                         ClearPageError(page);
3370                         err = __extent_read_full_page(tree, page,
3371                                                       get_extent, &bio,
3372                                                       mirror_num, &bio_flags);
3373                         if (err)
3374                                 ret = err;
3375                 } else {
3376                         unlock_page(page);
3377                 }
3378         }
3379
3380         if (bio)
3381                 submit_one_bio(READ, bio, mirror_num, bio_flags);
3382
3383         if (ret || !wait)
3384                 return ret;
3385
3386         for (i = start_i; i < num_pages; i++) {
3387                 page = extent_buffer_page(eb, i);
3388                 wait_on_page_locked(page);
3389                 if (!PageUptodate(page))
3390                         ret = -EIO;
3391         }
3392
3393         if (!ret)
3394                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3395         return ret;
3396
3397 unlock_exit:
3398         i = start_i;
3399         while (locked_pages > 0) {
3400                 page = extent_buffer_page(eb, i);
3401                 i++;
3402                 unlock_page(page);
3403                 locked_pages--;
3404         }
3405         return ret;
3406 }
3407
3408 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3409                         unsigned long start,
3410                         unsigned long len)
3411 {
3412         size_t cur;
3413         size_t offset;
3414         struct page *page;
3415         char *kaddr;
3416         char *dst = (char *)dstv;
3417         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3418         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3419
3420         WARN_ON(start > eb->len);
3421         WARN_ON(start + len > eb->start + eb->len);
3422
3423         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3424
3425         while (len > 0) {
3426                 page = extent_buffer_page(eb, i);
3427
3428                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3429                 kaddr = kmap_atomic(page, KM_USER1);
3430                 memcpy(dst, kaddr + offset, cur);
3431                 kunmap_atomic(kaddr, KM_USER1);
3432
3433                 dst += cur;
3434                 len -= cur;
3435                 offset = 0;
3436                 i++;
3437         }
3438 }
3439
3440 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
3441                                unsigned long min_len, char **token, char **map,
3442                                unsigned long *map_start,
3443                                unsigned long *map_len, int km)
3444 {
3445         size_t offset = start & (PAGE_CACHE_SIZE - 1);
3446         char *kaddr;
3447         struct page *p;
3448         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3449         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3450         unsigned long end_i = (start_offset + start + min_len - 1) >>
3451                 PAGE_CACHE_SHIFT;
3452
3453         if (i != end_i)
3454                 return -EINVAL;
3455
3456         if (i == 0) {
3457                 offset = start_offset;
3458                 *map_start = 0;
3459         } else {
3460                 offset = 0;
3461                 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
3462         }
3463
3464         if (start + min_len > eb->len) {
3465                 printk(KERN_ERR "btrfs bad mapping eb start %llu len %lu, "
3466                        "wanted %lu %lu\n", (unsigned long long)eb->start,
3467                        eb->len, start, min_len);
3468                 WARN_ON(1);
3469         }
3470
3471         p = extent_buffer_page(eb, i);
3472         kaddr = kmap_atomic(p, km);
3473         *token = kaddr;
3474         *map = kaddr + offset;
3475         *map_len = PAGE_CACHE_SIZE - offset;
3476         return 0;
3477 }
3478
3479 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
3480                       unsigned long min_len,
3481                       char **token, char **map,
3482                       unsigned long *map_start,
3483                       unsigned long *map_len, int km)
3484 {
3485         int err;
3486         int save = 0;
3487         if (eb->map_token) {
3488                 unmap_extent_buffer(eb, eb->map_token, km);
3489                 eb->map_token = NULL;
3490                 save = 1;
3491         }
3492         err = map_private_extent_buffer(eb, start, min_len, token, map,
3493                                        map_start, map_len, km);
3494         if (!err && save) {
3495                 eb->map_token = *token;
3496                 eb->kaddr = *map;
3497                 eb->map_start = *map_start;
3498                 eb->map_len = *map_len;
3499         }
3500         return err;
3501 }
3502
3503 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
3504 {
3505         kunmap_atomic(token, km);
3506 }
3507
3508 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3509                           unsigned long start,
3510                           unsigned long len)
3511 {
3512         size_t cur;
3513         size_t offset;
3514         struct page *page;
3515         char *kaddr;
3516         char *ptr = (char *)ptrv;
3517         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3518         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3519         int ret = 0;
3520
3521         WARN_ON(start > eb->len);
3522         WARN_ON(start + len > eb->start + eb->len);
3523
3524         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3525
3526         while (len > 0) {
3527                 page = extent_buffer_page(eb, i);
3528
3529                 cur = min(len, (PAGE_CACHE_SIZE - offset));
3530
3531                 kaddr = kmap_atomic(page, KM_USER0);
3532                 ret = memcmp(ptr, kaddr + offset, cur);
3533                 kunmap_atomic(kaddr, KM_USER0);
3534                 if (ret)
3535                         break;
3536
3537                 ptr += cur;
3538                 len -= cur;
3539                 offset = 0;
3540                 i++;
3541         }
3542         return ret;
3543 }
3544
3545 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
3546                          unsigned long start, unsigned long len)
3547 {
3548         size_t cur;
3549         size_t offset;
3550         struct page *page;
3551         char *kaddr;
3552         char *src = (char *)srcv;
3553         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3554         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3555
3556         WARN_ON(start > eb->len);
3557         WARN_ON(start + len > eb->start + eb->len);
3558
3559         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3560
3561         while (len > 0) {
3562                 page = extent_buffer_page(eb, i);
3563                 WARN_ON(!PageUptodate(page));
3564
3565                 cur = min(len, PAGE_CACHE_SIZE - offset);
3566                 kaddr = kmap_atomic(page, KM_USER1);
3567                 memcpy(kaddr + offset, src, cur);
3568                 kunmap_atomic(kaddr, KM_USER1);
3569
3570                 src += cur;
3571                 len -= cur;
3572                 offset = 0;
3573                 i++;
3574         }
3575 }
3576
3577 void memset_extent_buffer(struct extent_buffer *eb, char c,
3578                           unsigned long start, unsigned long len)
3579 {
3580         size_t cur;
3581         size_t offset;
3582         struct page *page;
3583         char *kaddr;
3584         size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3585         unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3586
3587         WARN_ON(start > eb->len);
3588         WARN_ON(start + len > eb->start + eb->len);
3589
3590         offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3591
3592         while (len > 0) {
3593                 page = extent_buffer_page(eb, i);
3594                 WARN_ON(!PageUptodate(page));
3595
3596                 cur = min(len, PAGE_CACHE_SIZE - offset);
3597                 kaddr = kmap_atomic(page, KM_USER0);
3598                 memset(kaddr + offset, c, cur);
3599                 kunmap_atomic(kaddr, KM_USER0);
3600
3601                 len -= cur;
3602                 offset = 0;
3603                 i++;
3604         }
3605 }
3606
3607 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3608                         unsigned long dst_offset, unsigned long src_offset,
3609                         unsigned long len)
3610 {
3611         u64 dst_len = dst->len;
3612         size_t cur;
3613         size_t offset;
3614         struct page *page;
3615         char *kaddr;
3616         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3617         unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3618
3619         WARN_ON(src->len != dst_len);
3620
3621         offset = (start_offset + dst_offset) &
3622                 ((unsigned long)PAGE_CACHE_SIZE - 1);
3623
3624         while (len > 0) {
3625                 page = extent_buffer_page(dst, i);
3626                 WARN_ON(!PageUptodate(page));
3627
3628                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3629
3630                 kaddr = kmap_atomic(page, KM_USER0);
3631                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3632                 kunmap_atomic(kaddr, KM_USER0);
3633
3634                 src_offset += cur;
3635                 len -= cur;
3636                 offset = 0;
3637                 i++;
3638         }
3639 }
3640
3641 static void move_pages(struct page *dst_page, struct page *src_page,
3642                        unsigned long dst_off, unsigned long src_off,
3643                        unsigned long len)
3644 {
3645         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3646         if (dst_page == src_page) {
3647                 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3648         } else {
3649                 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3650                 char *p = dst_kaddr + dst_off + len;
3651                 char *s = src_kaddr + src_off + len;
3652
3653                 while (len--)
3654                         *--p = *--s;
3655
3656                 kunmap_atomic(src_kaddr, KM_USER1);
3657         }
3658         kunmap_atomic(dst_kaddr, KM_USER0);
3659 }
3660
3661 static void copy_pages(struct page *dst_page, struct page *src_page,
3662                        unsigned long dst_off, unsigned long src_off,
3663                        unsigned long len)
3664 {
3665         char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3666         char *src_kaddr;
3667
3668         if (dst_page != src_page)
3669                 src_kaddr = kmap_atomic(src_page, KM_USER1);
3670         else
3671                 src_kaddr = dst_kaddr;
3672
3673         memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3674         kunmap_atomic(dst_kaddr, KM_USER0);
3675         if (dst_page != src_page)
3676                 kunmap_atomic(src_kaddr, KM_USER1);
3677 }
3678
3679 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3680                            unsigned long src_offset, unsigned long len)
3681 {
3682         size_t cur;
3683         size_t dst_off_in_page;
3684         size_t src_off_in_page;
3685         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3686         unsigned long dst_i;
3687         unsigned long src_i;
3688
3689         if (src_offset + len > dst->len) {
3690                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3691                        "len %lu dst len %lu\n", src_offset, len, dst->len);
3692                 BUG_ON(1);
3693         }
3694         if (dst_offset + len > dst->len) {
3695                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3696                        "len %lu dst len %lu\n", dst_offset, len, dst->len);
3697                 BUG_ON(1);
3698         }
3699
3700         while (len > 0) {
3701                 dst_off_in_page = (start_offset + dst_offset) &
3702                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3703                 src_off_in_page = (start_offset + src_offset) &
3704                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3705
3706                 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3707                 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3708
3709                 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3710                                                src_off_in_page));
3711                 cur = min_t(unsigned long, cur,
3712                         (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3713
3714                 copy_pages(extent_buffer_page(dst, dst_i),
3715                            extent_buffer_page(dst, src_i),
3716                            dst_off_in_page, src_off_in_page, cur);
3717
3718                 src_offset += cur;
3719                 dst_offset += cur;
3720                 len -= cur;
3721         }
3722 }
3723
3724 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3725                            unsigned long src_offset, unsigned long len)
3726 {
3727         size_t cur;
3728         size_t dst_off_in_page;
3729         size_t src_off_in_page;
3730         unsigned long dst_end = dst_offset + len - 1;
3731         unsigned long src_end = src_offset + len - 1;
3732         size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3733         unsigned long dst_i;
3734         unsigned long src_i;
3735
3736         if (src_offset + len > dst->len) {
3737                 printk(KERN_ERR "btrfs memmove bogus src_offset %lu move "
3738                        "len %lu len %lu\n", src_offset, len, dst->len);
3739                 BUG_ON(1);
3740         }
3741         if (dst_offset + len > dst->len) {
3742                 printk(KERN_ERR "btrfs memmove bogus dst_offset %lu move "
3743                        "len %lu len %lu\n", dst_offset, len, dst->len);
3744                 BUG_ON(1);
3745         }
3746         if (dst_offset < src_offset) {
3747                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3748                 return;
3749         }
3750         while (len > 0) {
3751                 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3752                 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3753
3754                 dst_off_in_page = (start_offset + dst_end) &
3755                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3756                 src_off_in_page = (start_offset + src_end) &
3757                         ((unsigned long)PAGE_CACHE_SIZE - 1);
3758
3759                 cur = min_t(unsigned long, len, src_off_in_page + 1);
3760                 cur = min(cur, dst_off_in_page + 1);
3761                 move_pages(extent_buffer_page(dst, dst_i),
3762                            extent_buffer_page(dst, src_i),
3763                            dst_off_in_page - cur + 1,
3764                            src_off_in_page - cur + 1, cur);
3765
3766                 dst_end -= cur;
3767                 src_end -= cur;
3768                 len -= cur;
3769         }
3770 }
3771
3772 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3773 {
3774         u64 start = page_offset(page);
3775         struct extent_buffer *eb;
3776         int ret = 1;
3777         unsigned long i;
3778         unsigned long num_pages;
3779
3780         spin_lock(&tree->buffer_lock);
3781         eb = buffer_search(tree, start);
3782         if (!eb)
3783                 goto out;
3784
3785         if (atomic_read(&eb->refs) > 1) {
3786                 ret = 0;
3787                 goto out;
3788         }
3789         if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3790                 ret = 0;
3791                 goto out;
3792         }
3793         /* at this point we can safely release the extent buffer */
3794         num_pages = num_extent_pages(eb->start, eb->len);
3795         for (i = 0; i < num_pages; i++)
3796                 page_cache_release(extent_buffer_page(eb, i));
3797         rb_erase(&eb->rb_node, &tree->buffer);
3798         __free_extent_buffer(eb);
3799 out:
3800         spin_unlock(&tree->buffer_lock);
3801         return ret;
3802 }