1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include "extent_map.h"
13 /* temporary define until extent_map moves out of btrfs */
14 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
15 unsigned long extra_flags,
16 void (*ctor)(void *, struct kmem_cache *,
19 static struct kmem_cache *extent_map_cache;
20 static struct kmem_cache *extent_state_cache;
21 static struct kmem_cache *extent_buffer_cache;
23 static LIST_HEAD(extent_buffers);
24 static LIST_HEAD(buffers);
25 static LIST_HEAD(states);
27 static spinlock_t extent_buffers_lock;
28 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
29 static int nr_extent_buffers;
30 #define MAX_EXTENT_BUFFER_CACHE 128
36 struct rb_node rb_node;
39 void __init extent_map_init(void)
41 extent_map_cache = btrfs_cache_create("extent_map",
42 sizeof(struct extent_map), 0,
44 extent_state_cache = btrfs_cache_create("extent_state",
45 sizeof(struct extent_state), 0,
47 extent_buffer_cache = btrfs_cache_create("extent_buffers",
48 sizeof(struct extent_buffer), 0,
50 spin_lock_init(&extent_buffers_lock);
53 void __exit extent_map_exit(void)
55 struct extent_buffer *eb;
56 struct extent_state *state;
58 while (!list_empty(&extent_buffers)) {
59 eb = list_entry(extent_buffers.next,
60 struct extent_buffer, list);
62 kmem_cache_free(extent_buffer_cache, eb);
64 while (!list_empty(&states)) {
65 state = list_entry(states.next, struct extent_state, list);
66 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
67 list_del(&state->list);
68 kmem_cache_free(extent_state_cache, state);
71 while (!list_empty(&buffers)) {
72 eb = list_entry(buffers.next,
73 struct extent_buffer, leak_list);
74 printk("buffer leak start %Lu len %lu return %lX\n", eb->start, eb->len, eb->alloc_addr);
75 list_del(&eb->leak_list);
76 kmem_cache_free(extent_buffer_cache, eb);
81 kmem_cache_destroy(extent_map_cache);
82 if (extent_state_cache)
83 kmem_cache_destroy(extent_state_cache);
84 if (extent_buffer_cache)
85 kmem_cache_destroy(extent_buffer_cache);
88 void extent_map_tree_init(struct extent_map_tree *tree,
89 struct address_space *mapping, gfp_t mask)
91 tree->map.rb_node = NULL;
92 tree->state.rb_node = NULL;
94 rwlock_init(&tree->lock);
95 tree->mapping = mapping;
97 EXPORT_SYMBOL(extent_map_tree_init);
99 struct extent_map *alloc_extent_map(gfp_t mask)
101 struct extent_map *em;
102 em = kmem_cache_alloc(extent_map_cache, mask);
103 if (!em || IS_ERR(em))
106 atomic_set(&em->refs, 1);
109 EXPORT_SYMBOL(alloc_extent_map);
111 void free_extent_map(struct extent_map *em)
115 if (atomic_dec_and_test(&em->refs)) {
116 WARN_ON(em->in_tree);
117 kmem_cache_free(extent_map_cache, em);
120 EXPORT_SYMBOL(free_extent_map);
123 struct extent_state *alloc_extent_state(gfp_t mask)
125 struct extent_state *state;
128 state = kmem_cache_alloc(extent_state_cache, mask);
129 if (!state || IS_ERR(state))
135 spin_lock_irqsave(&state_lock, flags);
136 list_add(&state->list, &states);
137 spin_unlock_irqrestore(&state_lock, flags);
139 atomic_set(&state->refs, 1);
140 init_waitqueue_head(&state->wq);
143 EXPORT_SYMBOL(alloc_extent_state);
145 void free_extent_state(struct extent_state *state)
150 if (atomic_dec_and_test(&state->refs)) {
151 WARN_ON(state->in_tree);
152 spin_lock_irqsave(&state_lock, flags);
153 list_del(&state->list);
154 spin_unlock_irqrestore(&state_lock, flags);
155 kmem_cache_free(extent_state_cache, state);
158 EXPORT_SYMBOL(free_extent_state);
160 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
161 struct rb_node *node)
163 struct rb_node ** p = &root->rb_node;
164 struct rb_node * parent = NULL;
165 struct tree_entry *entry;
169 entry = rb_entry(parent, struct tree_entry, rb_node);
171 if (offset < entry->start)
173 else if (offset > entry->end)
179 entry = rb_entry(node, struct tree_entry, rb_node);
181 rb_link_node(node, parent, p);
182 rb_insert_color(node, root);
186 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
187 struct rb_node **prev_ret)
189 struct rb_node * n = root->rb_node;
190 struct rb_node *prev = NULL;
191 struct tree_entry *entry;
192 struct tree_entry *prev_entry = NULL;
195 entry = rb_entry(n, struct tree_entry, rb_node);
199 if (offset < entry->start)
201 else if (offset > entry->end)
208 while(prev && offset > prev_entry->end) {
209 prev = rb_next(prev);
210 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
216 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
218 struct rb_node *prev;
220 ret = __tree_search(root, offset, &prev);
226 static int tree_delete(struct rb_root *root, u64 offset)
228 struct rb_node *node;
229 struct tree_entry *entry;
231 node = __tree_search(root, offset, NULL);
234 entry = rb_entry(node, struct tree_entry, rb_node);
236 rb_erase(node, root);
241 * add_extent_mapping tries a simple backward merge with existing
242 * mappings. The extent_map struct passed in will be inserted into
243 * the tree directly (no copies made, just a reference taken).
245 int add_extent_mapping(struct extent_map_tree *tree,
246 struct extent_map *em)
249 struct extent_map *prev = NULL;
252 write_lock_irq(&tree->lock);
253 rb = tree_insert(&tree->map, em->end, &em->rb_node);
255 prev = rb_entry(rb, struct extent_map, rb_node);
256 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
260 atomic_inc(&em->refs);
261 if (em->start != 0) {
262 rb = rb_prev(&em->rb_node);
264 prev = rb_entry(rb, struct extent_map, rb_node);
265 if (prev && prev->end + 1 == em->start &&
266 ((em->block_start == EXTENT_MAP_HOLE &&
267 prev->block_start == EXTENT_MAP_HOLE) ||
268 (em->block_start == prev->block_end + 1))) {
269 em->start = prev->start;
270 em->block_start = prev->block_start;
271 rb_erase(&prev->rb_node, &tree->map);
273 free_extent_map(prev);
277 write_unlock_irq(&tree->lock);
280 EXPORT_SYMBOL(add_extent_mapping);
283 * lookup_extent_mapping returns the first extent_map struct in the
284 * tree that intersects the [start, end] (inclusive) range. There may
285 * be additional objects in the tree that intersect, so check the object
286 * returned carefully to make sure you don't need additional lookups.
288 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
291 struct extent_map *em;
292 struct rb_node *rb_node;
294 read_lock_irq(&tree->lock);
295 rb_node = tree_search(&tree->map, start);
300 if (IS_ERR(rb_node)) {
301 em = ERR_PTR(PTR_ERR(rb_node));
304 em = rb_entry(rb_node, struct extent_map, rb_node);
305 if (em->end < start || em->start > end) {
309 atomic_inc(&em->refs);
311 read_unlock_irq(&tree->lock);
314 EXPORT_SYMBOL(lookup_extent_mapping);
317 * removes an extent_map struct from the tree. No reference counts are
318 * dropped, and no checks are done to see if the range is in use
320 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
324 write_lock_irq(&tree->lock);
325 ret = tree_delete(&tree->map, em->end);
326 write_unlock_irq(&tree->lock);
329 EXPORT_SYMBOL(remove_extent_mapping);
332 * utility function to look for merge candidates inside a given range.
333 * Any extents with matching state are merged together into a single
334 * extent in the tree. Extents with EXTENT_IO in their state field
335 * are not merged because the end_io handlers need to be able to do
336 * operations on them without sleeping (or doing allocations/splits).
338 * This should be called with the tree lock held.
340 static int merge_state(struct extent_map_tree *tree,
341 struct extent_state *state)
343 struct extent_state *other;
344 struct rb_node *other_node;
346 if (state->state & EXTENT_IOBITS)
349 other_node = rb_prev(&state->rb_node);
351 other = rb_entry(other_node, struct extent_state, rb_node);
352 if (other->end == state->start - 1 &&
353 other->state == state->state) {
354 state->start = other->start;
356 rb_erase(&other->rb_node, &tree->state);
357 free_extent_state(other);
360 other_node = rb_next(&state->rb_node);
362 other = rb_entry(other_node, struct extent_state, rb_node);
363 if (other->start == state->end + 1 &&
364 other->state == state->state) {
365 other->start = state->start;
367 rb_erase(&state->rb_node, &tree->state);
368 free_extent_state(state);
375 * insert an extent_state struct into the tree. 'bits' are set on the
376 * struct before it is inserted.
378 * This may return -EEXIST if the extent is already there, in which case the
379 * state struct is freed.
381 * The tree lock is not taken internally. This is a utility function and
382 * probably isn't what you want to call (see set/clear_extent_bit).
384 static int insert_state(struct extent_map_tree *tree,
385 struct extent_state *state, u64 start, u64 end,
388 struct rb_node *node;
391 printk("end < start %Lu %Lu\n", end, start);
394 state->state |= bits;
395 state->start = start;
397 node = tree_insert(&tree->state, end, &state->rb_node);
399 struct extent_state *found;
400 found = rb_entry(node, struct extent_state, rb_node);
401 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
402 free_extent_state(state);
405 merge_state(tree, state);
410 * split a given extent state struct in two, inserting the preallocated
411 * struct 'prealloc' as the newly created second half. 'split' indicates an
412 * offset inside 'orig' where it should be split.
415 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
416 * are two extent state structs in the tree:
417 * prealloc: [orig->start, split - 1]
418 * orig: [ split, orig->end ]
420 * The tree locks are not taken by this function. They need to be held
423 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
424 struct extent_state *prealloc, u64 split)
426 struct rb_node *node;
427 prealloc->start = orig->start;
428 prealloc->end = split - 1;
429 prealloc->state = orig->state;
432 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
434 struct extent_state *found;
435 found = rb_entry(node, struct extent_state, rb_node);
436 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
437 free_extent_state(prealloc);
444 * utility function to clear some bits in an extent state struct.
445 * it will optionally wake up any one waiting on this state (wake == 1), or
446 * forcibly remove the state from the tree (delete == 1).
448 * If no bits are set on the state struct after clearing things, the
449 * struct is freed and removed from the tree
451 static int clear_state_bit(struct extent_map_tree *tree,
452 struct extent_state *state, int bits, int wake,
455 int ret = state->state & bits;
456 state->state &= ~bits;
459 if (delete || state->state == 0) {
460 if (state->in_tree) {
461 rb_erase(&state->rb_node, &tree->state);
463 free_extent_state(state);
468 merge_state(tree, state);
474 * clear some bits on a range in the tree. This may require splitting
475 * or inserting elements in the tree, so the gfp mask is used to
476 * indicate which allocations or sleeping are allowed.
478 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
479 * the given range from the tree regardless of state (ie for truncate).
481 * the range [start, end] is inclusive.
483 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
484 * bits were already set, or zero if none of the bits were already set.
486 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
487 int bits, int wake, int delete, gfp_t mask)
489 struct extent_state *state;
490 struct extent_state *prealloc = NULL;
491 struct rb_node *node;
497 if (!prealloc && (mask & __GFP_WAIT)) {
498 prealloc = alloc_extent_state(mask);
503 write_lock_irqsave(&tree->lock, flags);
505 * this search will find the extents that end after
508 node = tree_search(&tree->state, start);
511 state = rb_entry(node, struct extent_state, rb_node);
512 if (state->start > end)
514 WARN_ON(state->end < start);
517 * | ---- desired range ---- |
519 * | ------------- state -------------- |
521 * We need to split the extent we found, and may flip
522 * bits on second half.
524 * If the extent we found extends past our range, we
525 * just split and search again. It'll get split again
526 * the next time though.
528 * If the extent we found is inside our range, we clear
529 * the desired bit on it.
532 if (state->start < start) {
533 err = split_state(tree, state, prealloc, start);
534 BUG_ON(err == -EEXIST);
538 if (state->end <= end) {
539 start = state->end + 1;
540 set |= clear_state_bit(tree, state, bits,
543 start = state->start;
548 * | ---- desired range ---- |
550 * We need to split the extent, and clear the bit
553 if (state->start <= end && state->end > end) {
554 err = split_state(tree, state, prealloc, end + 1);
555 BUG_ON(err == -EEXIST);
559 set |= clear_state_bit(tree, prealloc, bits,
565 start = state->end + 1;
566 set |= clear_state_bit(tree, state, bits, wake, delete);
570 write_unlock_irqrestore(&tree->lock, flags);
572 free_extent_state(prealloc);
579 write_unlock_irqrestore(&tree->lock, flags);
580 if (mask & __GFP_WAIT)
584 EXPORT_SYMBOL(clear_extent_bit);
586 static int wait_on_state(struct extent_map_tree *tree,
587 struct extent_state *state)
590 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
591 read_unlock_irq(&tree->lock);
593 read_lock_irq(&tree->lock);
594 finish_wait(&state->wq, &wait);
599 * waits for one or more bits to clear on a range in the state tree.
600 * The range [start, end] is inclusive.
601 * The tree lock is taken by this function
603 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
605 struct extent_state *state;
606 struct rb_node *node;
608 read_lock_irq(&tree->lock);
612 * this search will find all the extents that end after
615 node = tree_search(&tree->state, start);
619 state = rb_entry(node, struct extent_state, rb_node);
621 if (state->start > end)
624 if (state->state & bits) {
625 start = state->start;
626 atomic_inc(&state->refs);
627 wait_on_state(tree, state);
628 free_extent_state(state);
631 start = state->end + 1;
636 if (need_resched()) {
637 read_unlock_irq(&tree->lock);
639 read_lock_irq(&tree->lock);
643 read_unlock_irq(&tree->lock);
646 EXPORT_SYMBOL(wait_extent_bit);
649 * set some bits on a range in the tree. This may require allocations
650 * or sleeping, so the gfp mask is used to indicate what is allowed.
652 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
653 * range already has the desired bits set. The start of the existing
654 * range is returned in failed_start in this case.
656 * [start, end] is inclusive
657 * This takes the tree lock.
659 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
660 int exclusive, u64 *failed_start, gfp_t mask)
662 struct extent_state *state;
663 struct extent_state *prealloc = NULL;
664 struct rb_node *node;
671 if (!prealloc && (mask & __GFP_WAIT)) {
672 prealloc = alloc_extent_state(mask);
677 write_lock_irqsave(&tree->lock, flags);
679 * this search will find all the extents that end after
682 node = tree_search(&tree->state, start);
684 err = insert_state(tree, prealloc, start, end, bits);
686 BUG_ON(err == -EEXIST);
690 state = rb_entry(node, struct extent_state, rb_node);
691 last_start = state->start;
692 last_end = state->end;
695 * | ---- desired range ---- |
698 * Just lock what we found and keep going
700 if (state->start == start && state->end <= end) {
701 set = state->state & bits;
702 if (set && exclusive) {
703 *failed_start = state->start;
707 state->state |= bits;
708 start = state->end + 1;
709 merge_state(tree, state);
714 * | ---- desired range ---- |
717 * | ------------- state -------------- |
719 * We need to split the extent we found, and may flip bits on
722 * If the extent we found extends past our
723 * range, we just split and search again. It'll get split
724 * again the next time though.
726 * If the extent we found is inside our range, we set the
729 if (state->start < start) {
730 set = state->state & bits;
731 if (exclusive && set) {
732 *failed_start = start;
736 err = split_state(tree, state, prealloc, start);
737 BUG_ON(err == -EEXIST);
741 if (state->end <= end) {
742 state->state |= bits;
743 start = state->end + 1;
744 merge_state(tree, state);
746 start = state->start;
751 * | ---- desired range ---- |
752 * | state | or | state |
754 * There's a hole, we need to insert something in it and
755 * ignore the extent we found.
757 if (state->start > start) {
759 if (end < last_start)
762 this_end = last_start -1;
763 err = insert_state(tree, prealloc, start, this_end,
766 BUG_ON(err == -EEXIST);
769 start = this_end + 1;
773 * | ---- desired range ---- |
775 * We need to split the extent, and set the bit
778 if (state->start <= end && state->end > end) {
779 set = state->state & bits;
780 if (exclusive && set) {
781 *failed_start = start;
785 err = split_state(tree, state, prealloc, end + 1);
786 BUG_ON(err == -EEXIST);
788 prealloc->state |= bits;
789 merge_state(tree, prealloc);
797 write_unlock_irqrestore(&tree->lock, flags);
799 free_extent_state(prealloc);
806 write_unlock_irqrestore(&tree->lock, flags);
807 if (mask & __GFP_WAIT)
811 EXPORT_SYMBOL(set_extent_bit);
813 /* wrappers around set/clear extent bit */
814 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
817 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
820 EXPORT_SYMBOL(set_extent_dirty);
822 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
823 int bits, gfp_t mask)
825 return set_extent_bit(tree, start, end, bits, 0, NULL,
828 EXPORT_SYMBOL(set_extent_bits);
830 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
831 int bits, gfp_t mask)
833 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
835 EXPORT_SYMBOL(clear_extent_bits);
837 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
840 return set_extent_bit(tree, start, end,
841 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
844 EXPORT_SYMBOL(set_extent_delalloc);
846 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
849 return clear_extent_bit(tree, start, end,
850 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
852 EXPORT_SYMBOL(clear_extent_dirty);
854 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
857 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
860 EXPORT_SYMBOL(set_extent_new);
862 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
865 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
867 EXPORT_SYMBOL(clear_extent_new);
869 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
872 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
875 EXPORT_SYMBOL(set_extent_uptodate);
877 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
880 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
882 EXPORT_SYMBOL(clear_extent_uptodate);
884 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
887 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
890 EXPORT_SYMBOL(set_extent_writeback);
892 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
895 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
897 EXPORT_SYMBOL(clear_extent_writeback);
899 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
901 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
903 EXPORT_SYMBOL(wait_on_extent_writeback);
906 * locks a range in ascending order, waiting for any locked regions
907 * it hits on the way. [start,end] are inclusive, and this will sleep.
909 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
914 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
915 &failed_start, mask);
916 if (err == -EEXIST && (mask & __GFP_WAIT)) {
917 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
918 start = failed_start;
922 WARN_ON(start > end);
926 EXPORT_SYMBOL(lock_extent);
928 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
931 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
933 EXPORT_SYMBOL(unlock_extent);
936 * helper function to set pages and extents in the tree dirty
938 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
940 unsigned long index = start >> PAGE_CACHE_SHIFT;
941 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
944 while (index <= end_index) {
945 page = find_get_page(tree->mapping, index);
947 __set_page_dirty_nobuffers(page);
948 page_cache_release(page);
951 set_extent_dirty(tree, start, end, GFP_NOFS);
954 EXPORT_SYMBOL(set_range_dirty);
957 * helper function to set both pages and extents in the tree writeback
959 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
961 unsigned long index = start >> PAGE_CACHE_SHIFT;
962 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
965 while (index <= end_index) {
966 page = find_get_page(tree->mapping, index);
968 set_page_writeback(page);
969 page_cache_release(page);
972 set_extent_writeback(tree, start, end, GFP_NOFS);
975 EXPORT_SYMBOL(set_range_writeback);
977 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
978 u64 *start_ret, u64 *end_ret, int bits)
980 struct rb_node *node;
981 struct extent_state *state;
984 write_lock_irq(&tree->lock);
986 * this search will find all the extents that end after
989 node = tree_search(&tree->state, start);
990 if (!node || IS_ERR(node)) {
995 state = rb_entry(node, struct extent_state, rb_node);
996 if (state->state & bits) {
997 *start_ret = state->start;
998 *end_ret = state->end;
1002 node = rb_next(node);
1007 write_unlock_irq(&tree->lock);
1010 EXPORT_SYMBOL(find_first_extent_bit);
1012 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1013 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1015 struct rb_node *node;
1016 struct extent_state *state;
1017 u64 cur_start = start;
1019 u64 total_bytes = 0;
1021 write_lock_irq(&tree->lock);
1023 * this search will find all the extents that end after
1027 node = tree_search(&tree->state, cur_start);
1028 if (!node || IS_ERR(node)) {
1033 state = rb_entry(node, struct extent_state, rb_node);
1034 if (state->start != cur_start) {
1037 if (!(state->state & EXTENT_DELALLOC)) {
1040 if (state->start >= lock_start) {
1041 if (state->state & EXTENT_LOCKED) {
1043 atomic_inc(&state->refs);
1044 write_unlock_irq(&tree->lock);
1046 write_lock_irq(&tree->lock);
1047 finish_wait(&state->wq, &wait);
1048 free_extent_state(state);
1051 state->state |= EXTENT_LOCKED;
1055 cur_start = state->end + 1;
1056 node = rb_next(node);
1059 total_bytes = state->end - state->start + 1;
1060 if (total_bytes >= max_bytes)
1064 write_unlock_irq(&tree->lock);
1069 * helper function to lock both pages and extents in the tree.
1070 * pages must be locked first.
1072 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1074 unsigned long index = start >> PAGE_CACHE_SHIFT;
1075 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1079 while (index <= end_index) {
1080 page = grab_cache_page(tree->mapping, index);
1086 err = PTR_ERR(page);
1091 lock_extent(tree, start, end, GFP_NOFS);
1096 * we failed above in getting the page at 'index', so we undo here
1097 * up to but not including the page at 'index'
1100 index = start >> PAGE_CACHE_SHIFT;
1101 while (index < end_index) {
1102 page = find_get_page(tree->mapping, index);
1104 page_cache_release(page);
1109 EXPORT_SYMBOL(lock_range);
1112 * helper function to unlock both pages and extents in the tree.
1114 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1116 unsigned long index = start >> PAGE_CACHE_SHIFT;
1117 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1120 while (index <= end_index) {
1121 page = find_get_page(tree->mapping, index);
1123 page_cache_release(page);
1126 unlock_extent(tree, start, end, GFP_NOFS);
1129 EXPORT_SYMBOL(unlock_range);
1131 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1133 struct rb_node *node;
1134 struct extent_state *state;
1137 write_lock_irq(&tree->lock);
1139 * this search will find all the extents that end after
1142 node = tree_search(&tree->state, start);
1143 if (!node || IS_ERR(node)) {
1147 state = rb_entry(node, struct extent_state, rb_node);
1148 if (state->start != start) {
1152 state->private = private;
1154 write_unlock_irq(&tree->lock);
1158 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1160 struct rb_node *node;
1161 struct extent_state *state;
1164 read_lock_irq(&tree->lock);
1166 * this search will find all the extents that end after
1169 node = tree_search(&tree->state, start);
1170 if (!node || IS_ERR(node)) {
1174 state = rb_entry(node, struct extent_state, rb_node);
1175 if (state->start != start) {
1179 *private = state->private;
1181 read_unlock_irq(&tree->lock);
1186 * searches a range in the state tree for a given mask.
1187 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1188 * has the bits set. Otherwise, 1 is returned if any bit in the
1189 * range is found set.
1191 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1192 int bits, int filled)
1194 struct extent_state *state = NULL;
1195 struct rb_node *node;
1198 read_lock_irq(&tree->lock);
1199 node = tree_search(&tree->state, start);
1200 while (node && start <= end) {
1201 state = rb_entry(node, struct extent_state, rb_node);
1202 if (state->start > end)
1205 if (filled && state->start > start) {
1209 if (state->state & bits) {
1213 } else if (filled) {
1217 start = state->end + 1;
1220 node = rb_next(node);
1222 read_unlock_irq(&tree->lock);
1225 EXPORT_SYMBOL(test_range_bit);
1228 * helper function to set a given page up to date if all the
1229 * extents in the tree for that page are up to date
1231 static int check_page_uptodate(struct extent_map_tree *tree,
1234 u64 start = page->index << PAGE_CACHE_SHIFT;
1235 u64 end = start + PAGE_CACHE_SIZE - 1;
1236 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1237 SetPageUptodate(page);
1242 * helper function to unlock a page if all the extents in the tree
1243 * for that page are unlocked
1245 static int check_page_locked(struct extent_map_tree *tree,
1248 u64 start = page->index << PAGE_CACHE_SHIFT;
1249 u64 end = start + PAGE_CACHE_SIZE - 1;
1250 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1256 * helper function to end page writeback if all the extents
1257 * in the tree for that page are done with writeback
1259 static int check_page_writeback(struct extent_map_tree *tree,
1262 u64 start = page->index << PAGE_CACHE_SHIFT;
1263 u64 end = start + PAGE_CACHE_SIZE - 1;
1264 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1265 end_page_writeback(page);
1269 /* lots and lots of room for performance fixes in the end_bio funcs */
1272 * after a writepage IO is done, we need to:
1273 * clear the uptodate bits on error
1274 * clear the writeback bits in the extent tree for this IO
1275 * end_page_writeback if the page has no more pending IO
1277 * Scheduling is not allowed, so the extent state tree is expected
1278 * to have one and only one object corresponding to this IO.
1280 static int end_bio_extent_writepage(struct bio *bio,
1281 unsigned int bytes_done, int err)
1283 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1284 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1285 struct extent_map_tree *tree = bio->bi_private;
1294 struct page *page = bvec->bv_page;
1295 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1296 end = start + bvec->bv_len - 1;
1298 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1303 if (--bvec >= bio->bi_io_vec)
1304 prefetchw(&bvec->bv_page->flags);
1307 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1308 ClearPageUptodate(page);
1311 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1314 end_page_writeback(page);
1316 check_page_writeback(tree, page);
1317 if (tree->ops && tree->ops->writepage_end_io_hook)
1318 tree->ops->writepage_end_io_hook(page, start, end);
1319 } while (bvec >= bio->bi_io_vec);
1326 * after a readpage IO is done, we need to:
1327 * clear the uptodate bits on error
1328 * set the uptodate bits if things worked
1329 * set the page up to date if all extents in the tree are uptodate
1330 * clear the lock bit in the extent tree
1331 * unlock the page if there are no other extents locked for it
1333 * Scheduling is not allowed, so the extent state tree is expected
1334 * to have one and only one object corresponding to this IO.
1336 static int end_bio_extent_readpage(struct bio *bio,
1337 unsigned int bytes_done, int err)
1339 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1340 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1341 struct extent_map_tree *tree = bio->bi_private;
1351 struct page *page = bvec->bv_page;
1352 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1353 end = start + bvec->bv_len - 1;
1355 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1360 if (--bvec >= bio->bi_io_vec)
1361 prefetchw(&bvec->bv_page->flags);
1363 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1364 ret = tree->ops->readpage_end_io_hook(page, start, end);
1369 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1371 SetPageUptodate(page);
1373 check_page_uptodate(tree, page);
1375 ClearPageUptodate(page);
1379 unlock_extent(tree, start, end, GFP_ATOMIC);
1384 check_page_locked(tree, page);
1385 } while (bvec >= bio->bi_io_vec);
1392 * IO done from prepare_write is pretty simple, we just unlock
1393 * the structs in the extent tree when done, and set the uptodate bits
1396 static int end_bio_extent_preparewrite(struct bio *bio,
1397 unsigned int bytes_done, int err)
1399 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1400 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1401 struct extent_map_tree *tree = bio->bi_private;
1409 struct page *page = bvec->bv_page;
1410 start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1411 end = start + bvec->bv_len - 1;
1413 if (--bvec >= bio->bi_io_vec)
1414 prefetchw(&bvec->bv_page->flags);
1417 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1419 ClearPageUptodate(page);
1423 unlock_extent(tree, start, end, GFP_ATOMIC);
1425 } while (bvec >= bio->bi_io_vec);
1431 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1432 struct page *page, sector_t sector,
1433 size_t size, unsigned long offset,
1434 struct block_device *bdev,
1435 bio_end_io_t end_io_func)
1440 bio = bio_alloc(GFP_NOIO, 1);
1442 bio->bi_sector = sector;
1443 bio->bi_bdev = bdev;
1444 bio->bi_io_vec[0].bv_page = page;
1445 bio->bi_io_vec[0].bv_len = size;
1446 bio->bi_io_vec[0].bv_offset = offset;
1450 bio->bi_size = size;
1452 bio->bi_end_io = end_io_func;
1453 bio->bi_private = tree;
1456 submit_bio(rw, bio);
1458 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1465 void set_page_extent_mapped(struct page *page)
1467 if (!PagePrivate(page)) {
1468 SetPagePrivate(page);
1469 WARN_ON(!page->mapping->a_ops->invalidatepage);
1470 set_page_private(page, 1);
1471 page_cache_get(page);
1476 * basic readpage implementation. Locked extent state structs are inserted
1477 * into the tree that are removed when the IO is done (by the end_io
1480 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1481 get_extent_t *get_extent)
1483 struct inode *inode = page->mapping->host;
1484 u64 start = page->index << PAGE_CACHE_SHIFT;
1485 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1489 u64 last_byte = i_size_read(inode);
1493 struct extent_map *em;
1494 struct block_device *bdev;
1497 size_t page_offset = 0;
1499 size_t blocksize = inode->i_sb->s_blocksize;
1501 set_page_extent_mapped(page);
1504 lock_extent(tree, start, end, GFP_NOFS);
1506 while (cur <= end) {
1507 if (cur >= last_byte) {
1508 iosize = PAGE_CACHE_SIZE - page_offset;
1509 zero_user_page(page, page_offset, iosize, KM_USER0);
1510 set_extent_uptodate(tree, cur, cur + iosize - 1,
1512 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1515 em = get_extent(inode, page, page_offset, cur, end, 0);
1516 if (IS_ERR(em) || !em) {
1518 unlock_extent(tree, cur, end, GFP_NOFS);
1522 extent_offset = cur - em->start;
1523 BUG_ON(em->end < cur);
1526 iosize = min(em->end - cur, end - cur) + 1;
1527 cur_end = min(em->end, end);
1528 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1529 sector = (em->block_start + extent_offset) >> 9;
1531 block_start = em->block_start;
1532 free_extent_map(em);
1535 /* we've found a hole, just zero and go on */
1536 if (block_start == EXTENT_MAP_HOLE) {
1537 zero_user_page(page, page_offset, iosize, KM_USER0);
1538 set_extent_uptodate(tree, cur, cur + iosize - 1,
1540 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1542 page_offset += iosize;
1545 /* the get_extent function already copied into the page */
1546 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1547 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1549 page_offset += iosize;
1554 if (tree->ops && tree->ops->readpage_io_hook) {
1555 ret = tree->ops->readpage_io_hook(page, cur,
1559 ret = submit_extent_page(READ, tree, page,
1560 sector, iosize, page_offset,
1561 bdev, end_bio_extent_readpage);
1566 page_offset += iosize;
1570 if (!PageError(page))
1571 SetPageUptodate(page);
1576 EXPORT_SYMBOL(extent_read_full_page);
1579 * the writepage semantics are similar to regular writepage. extent
1580 * records are inserted to lock ranges in the tree, and as dirty areas
1581 * are found, they are marked writeback. Then the lock bits are removed
1582 * and the end_io handler clears the writeback ranges
1584 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1585 get_extent_t *get_extent,
1586 struct writeback_control *wbc)
1588 struct inode *inode = page->mapping->host;
1589 u64 start = page->index << PAGE_CACHE_SHIFT;
1590 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1594 u64 last_byte = i_size_read(inode);
1597 struct extent_map *em;
1598 struct block_device *bdev;
1601 size_t page_offset = 0;
1604 loff_t i_size = i_size_read(inode);
1605 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1609 WARN_ON(!PageLocked(page));
1610 if (page->index > end_index) {
1611 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1616 if (page->index == end_index) {
1617 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1618 zero_user_page(page, offset,
1619 PAGE_CACHE_SIZE - offset, KM_USER0);
1622 set_page_extent_mapped(page);
1624 lock_extent(tree, start, page_end, GFP_NOFS);
1625 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1629 tree->ops->fill_delalloc(inode, start, delalloc_end);
1630 if (delalloc_end >= page_end + 1) {
1631 clear_extent_bit(tree, page_end + 1, delalloc_end,
1632 EXTENT_LOCKED | EXTENT_DELALLOC,
1635 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1637 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1638 printk("found delalloc bits after clear extent_bit\n");
1640 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1641 printk("found delalloc bits after find_delalloc_range returns 0\n");
1645 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1646 printk("found delalloc bits after lock_extent\n");
1649 if (last_byte <= start) {
1650 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1654 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1655 blocksize = inode->i_sb->s_blocksize;
1657 while (cur <= end) {
1658 if (cur >= last_byte) {
1659 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1662 em = get_extent(inode, page, page_offset, cur, end, 0);
1663 if (IS_ERR(em) || !em) {
1668 extent_offset = cur - em->start;
1669 BUG_ON(em->end < cur);
1671 iosize = min(em->end - cur, end - cur) + 1;
1672 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1673 sector = (em->block_start + extent_offset) >> 9;
1675 block_start = em->block_start;
1676 free_extent_map(em);
1679 if (block_start == EXTENT_MAP_HOLE ||
1680 block_start == EXTENT_MAP_INLINE) {
1681 clear_extent_dirty(tree, cur,
1682 cur + iosize - 1, GFP_NOFS);
1684 page_offset += iosize;
1688 /* leave this out until we have a page_mkwrite call */
1689 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1692 page_offset += iosize;
1695 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1696 if (tree->ops && tree->ops->writepage_io_hook) {
1697 ret = tree->ops->writepage_io_hook(page, cur,
1705 set_range_writeback(tree, cur, cur + iosize - 1);
1706 ret = submit_extent_page(WRITE, tree, page, sector,
1707 iosize, page_offset, bdev,
1708 end_bio_extent_writepage);
1713 page_offset += iosize;
1717 unlock_extent(tree, start, page_end, GFP_NOFS);
1721 EXPORT_SYMBOL(extent_write_full_page);
1724 * basic invalidatepage code, this waits on any locked or writeback
1725 * ranges corresponding to the page, and then deletes any extent state
1726 * records from the tree
1728 int extent_invalidatepage(struct extent_map_tree *tree,
1729 struct page *page, unsigned long offset)
1731 u64 start = (page->index << PAGE_CACHE_SHIFT);
1732 u64 end = start + PAGE_CACHE_SIZE - 1;
1733 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1735 start += (offset + blocksize -1) & ~(blocksize - 1);
1739 lock_extent(tree, start, end, GFP_NOFS);
1740 wait_on_extent_writeback(tree, start, end);
1741 clear_extent_bit(tree, start, end,
1742 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
1746 EXPORT_SYMBOL(extent_invalidatepage);
1749 * simple commit_write call, set_range_dirty is used to mark both
1750 * the pages and the extent records as dirty
1752 int extent_commit_write(struct extent_map_tree *tree,
1753 struct inode *inode, struct page *page,
1754 unsigned from, unsigned to)
1756 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1758 set_page_extent_mapped(page);
1759 set_page_dirty(page);
1761 if (pos > inode->i_size) {
1762 i_size_write(inode, pos);
1763 mark_inode_dirty(inode);
1767 EXPORT_SYMBOL(extent_commit_write);
1769 int extent_prepare_write(struct extent_map_tree *tree,
1770 struct inode *inode, struct page *page,
1771 unsigned from, unsigned to, get_extent_t *get_extent)
1773 u64 page_start = page->index << PAGE_CACHE_SHIFT;
1774 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1776 u64 orig_block_start;
1779 struct extent_map *em;
1780 unsigned blocksize = 1 << inode->i_blkbits;
1781 size_t page_offset = 0;
1782 size_t block_off_start;
1783 size_t block_off_end;
1789 set_page_extent_mapped(page);
1791 block_start = (page_start + from) & ~((u64)blocksize - 1);
1792 block_end = (page_start + to - 1) | (blocksize - 1);
1793 orig_block_start = block_start;
1795 lock_extent(tree, page_start, page_end, GFP_NOFS);
1796 while(block_start <= block_end) {
1797 em = get_extent(inode, page, page_offset, block_start,
1799 if (IS_ERR(em) || !em) {
1802 cur_end = min(block_end, em->end);
1803 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1804 block_off_end = block_off_start + blocksize;
1805 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1807 if (!PageUptodate(page) && isnew &&
1808 (block_off_end > to || block_off_start < from)) {
1811 kaddr = kmap_atomic(page, KM_USER0);
1812 if (block_off_end > to)
1813 memset(kaddr + to, 0, block_off_end - to);
1814 if (block_off_start < from)
1815 memset(kaddr + block_off_start, 0,
1816 from - block_off_start);
1817 flush_dcache_page(page);
1818 kunmap_atomic(kaddr, KM_USER0);
1820 if (!isnew && !PageUptodate(page) &&
1821 (block_off_end > to || block_off_start < from) &&
1822 !test_range_bit(tree, block_start, cur_end,
1823 EXTENT_UPTODATE, 1)) {
1825 u64 extent_offset = block_start - em->start;
1827 sector = (em->block_start + extent_offset) >> 9;
1828 iosize = (cur_end - block_start + blocksize - 1) &
1829 ~((u64)blocksize - 1);
1831 * we've already got the extent locked, but we
1832 * need to split the state such that our end_bio
1833 * handler can clear the lock.
1835 set_extent_bit(tree, block_start,
1836 block_start + iosize - 1,
1837 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1838 ret = submit_extent_page(READ, tree, page,
1839 sector, iosize, page_offset, em->bdev,
1840 end_bio_extent_preparewrite);
1842 block_start = block_start + iosize;
1844 set_extent_uptodate(tree, block_start, cur_end,
1846 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1847 block_start = cur_end + 1;
1849 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1850 free_extent_map(em);
1853 wait_extent_bit(tree, orig_block_start,
1854 block_end, EXTENT_LOCKED);
1856 check_page_uptodate(tree, page);
1858 /* FIXME, zero out newly allocated blocks on error */
1861 EXPORT_SYMBOL(extent_prepare_write);
1864 * a helper for releasepage. As long as there are no locked extents
1865 * in the range corresponding to the page, both state records and extent
1866 * map records are removed
1868 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1870 struct extent_map *em;
1871 u64 start = page->index << PAGE_CACHE_SHIFT;
1872 u64 end = start + PAGE_CACHE_SIZE - 1;
1873 u64 orig_start = start;
1876 while (start <= end) {
1877 em = lookup_extent_mapping(tree, start, end);
1878 if (!em || IS_ERR(em))
1880 if (!test_range_bit(tree, em->start, em->end,
1881 EXTENT_LOCKED, 0)) {
1882 remove_extent_mapping(tree, em);
1883 /* once for the rb tree */
1884 free_extent_map(em);
1886 start = em->end + 1;
1888 free_extent_map(em);
1890 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1893 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1897 EXPORT_SYMBOL(try_release_extent_mapping);
1899 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
1900 get_extent_t *get_extent)
1902 struct inode *inode = mapping->host;
1903 u64 start = iblock << inode->i_blkbits;
1904 u64 end = start + (1 << inode->i_blkbits) - 1;
1905 struct extent_map *em;
1907 em = get_extent(inode, NULL, 0, start, end, 0);
1908 if (!em || IS_ERR(em))
1911 if (em->block_start == EXTENT_MAP_INLINE ||
1912 em->block_start == EXTENT_MAP_HOLE)
1915 return (em->block_start + start - em->start) >> inode->i_blkbits;
1918 static struct extent_buffer *__alloc_extent_buffer(gfp_t mask)
1920 struct extent_buffer *eb = NULL;
1922 spin_lock(&extent_buffers_lock);
1923 if (!list_empty(&extent_buffers)) {
1924 eb = list_entry(extent_buffers.next, struct extent_buffer,
1926 list_del(&eb->list);
1927 WARN_ON(nr_extent_buffers == 0);
1928 nr_extent_buffers--;
1930 spin_unlock(&extent_buffers_lock);
1933 memset(eb, 0, sizeof(*eb));
1935 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
1937 spin_lock(&extent_buffers_lock);
1938 list_add(&eb->leak_list, &buffers);
1939 spin_unlock(&extent_buffers_lock);
1944 static void __free_extent_buffer(struct extent_buffer *eb)
1947 spin_lock(&extent_buffers_lock);
1948 list_del_init(&eb->leak_list);
1949 spin_unlock(&extent_buffers_lock);
1951 if (nr_extent_buffers >= MAX_EXTENT_BUFFER_CACHE) {
1952 kmem_cache_free(extent_buffer_cache, eb);
1954 spin_lock(&extent_buffers_lock);
1955 list_add(&eb->list, &extent_buffers);
1956 nr_extent_buffers++;
1957 spin_unlock(&extent_buffers_lock);
1961 static inline struct page *extent_buffer_page(struct extent_buffer *eb, int i)
1965 if (i < EXTENT_INLINE_PAGES)
1966 return eb->pages[i];
1967 i += eb->start >> PAGE_CACHE_SHIFT;
1968 p = find_get_page(eb->pages[0]->mapping, i);
1969 page_cache_release(p);
1973 static inline unsigned long num_extent_pages(u64 start, u64 len)
1975 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
1976 (start >> PAGE_CACHE_SHIFT);
1978 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
1979 u64 start, unsigned long len,
1982 unsigned long num_pages = num_extent_pages(start, len);
1984 unsigned long index = start >> PAGE_CACHE_SHIFT;
1985 struct extent_buffer *eb;
1987 struct address_space *mapping = tree->mapping;
1990 eb = __alloc_extent_buffer(mask);
1991 if (!eb || IS_ERR(eb))
1994 eb->alloc_addr = (unsigned long)__builtin_return_address(0);
1997 atomic_set(&eb->refs, 1);
1999 for (i = 0; i < num_pages; i++, index++) {
2000 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2003 /* make sure the free only frees the pages we've
2004 * grabbed a reference on
2006 eb->len = i << PAGE_CACHE_SHIFT;
2007 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2010 set_page_extent_mapped(p);
2011 if (i < EXTENT_INLINE_PAGES)
2013 if (!PageUptodate(p))
2018 eb->flags |= EXTENT_UPTODATE;
2021 free_extent_buffer(eb);
2024 EXPORT_SYMBOL(alloc_extent_buffer);
2026 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2027 u64 start, unsigned long len,
2030 unsigned long num_pages = num_extent_pages(start, len);
2032 unsigned long index = start >> PAGE_CACHE_SHIFT;
2033 struct extent_buffer *eb;
2035 struct address_space *mapping = tree->mapping;
2038 eb = __alloc_extent_buffer(mask);
2039 if (!eb || IS_ERR(eb))
2042 eb->alloc_addr = (unsigned long)__builtin_return_address(0);
2045 atomic_set(&eb->refs, 1);
2047 for (i = 0; i < num_pages; i++, index++) {
2048 p = find_lock_page(mapping, index);
2050 /* make sure the free only frees the pages we've
2051 * grabbed a reference on
2053 eb->len = i << PAGE_CACHE_SHIFT;
2054 eb->start &= ~((u64)PAGE_CACHE_SIZE - 1);
2057 set_page_extent_mapped(p);
2058 if (i < EXTENT_INLINE_PAGES)
2060 if (!PageUptodate(p))
2065 eb->flags |= EXTENT_UPTODATE;
2068 free_extent_buffer(eb);
2071 EXPORT_SYMBOL(find_extent_buffer);
2073 void free_extent_buffer(struct extent_buffer *eb)
2076 unsigned long num_pages;
2081 if (!atomic_dec_and_test(&eb->refs))
2084 num_pages = num_extent_pages(eb->start, eb->len);
2086 for (i = 0; i < num_pages; i++) {
2087 page_cache_release(extent_buffer_page(eb, i));
2089 __free_extent_buffer(eb);
2091 EXPORT_SYMBOL(free_extent_buffer);
2093 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2094 struct extent_buffer *eb)
2098 unsigned long num_pages;
2101 u64 start = eb->start;
2102 u64 end = start + eb->len - 1;
2104 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2105 num_pages = num_extent_pages(eb->start, eb->len);
2107 for (i = 0; i < num_pages; i++) {
2108 page = extent_buffer_page(eb, i);
2111 * if we're on the last page or the first page and the
2112 * block isn't aligned on a page boundary, do extra checks
2113 * to make sure we don't clean page that is partially dirty
2115 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2116 ((i == num_pages - 1) &&
2117 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2118 start = page->index << PAGE_CACHE_SHIFT;
2119 end = start + PAGE_CACHE_SIZE - 1;
2120 if (test_range_bit(tree, start, end,
2126 clear_page_dirty_for_io(page);
2131 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2133 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2134 struct extent_buffer *eb)
2136 return wait_on_extent_writeback(tree, eb->start,
2137 eb->start + eb->len - 1);
2139 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2141 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2142 struct extent_buffer *eb)
2144 return set_range_dirty(tree, eb->start, eb->start + eb->len - 1);
2146 EXPORT_SYMBOL(set_extent_buffer_dirty);
2148 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2149 struct extent_buffer *eb)
2153 unsigned long num_pages;
2155 num_pages = num_extent_pages(eb->start, eb->len);
2157 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2159 for (i = 0; i < num_pages; i++) {
2160 page = extent_buffer_page(eb, i);
2161 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2162 ((i == num_pages - 1) &&
2163 ((eb->start + eb->len - 1) & (PAGE_CACHE_SIZE - 1)))) {
2164 check_page_uptodate(tree, page);
2167 SetPageUptodate(page);
2171 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2173 int extent_buffer_uptodate(struct extent_map_tree *tree,
2174 struct extent_buffer *eb)
2176 if (eb->flags & EXTENT_UPTODATE)
2178 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2179 EXTENT_UPTODATE, 1);
2181 EXPORT_SYMBOL(extent_buffer_uptodate);
2183 int read_extent_buffer_pages(struct extent_map_tree *tree,
2184 struct extent_buffer *eb, int wait)
2190 unsigned long num_pages;
2192 if (eb->flags & EXTENT_UPTODATE)
2195 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2196 EXTENT_UPTODATE, 1)) {
2200 num_pages = num_extent_pages(eb->start, eb->len);
2201 for (i = 0; i < num_pages; i++) {
2202 page = extent_buffer_page(eb, i);
2203 if (PageUptodate(page)) {
2207 if (TestSetPageLocked(page)) {
2213 if (!PageUptodate(page)) {
2214 err = page->mapping->a_ops->readpage(NULL, page);
2227 for (i = 0; i < num_pages; i++) {
2228 page = extent_buffer_page(eb, i);
2229 wait_on_page_locked(page);
2230 if (!PageUptodate(page)) {
2234 eb->flags |= EXTENT_UPTODATE;
2237 EXPORT_SYMBOL(read_extent_buffer_pages);
2239 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2240 unsigned long start,
2247 char *dst = (char *)dstv;
2248 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2249 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2250 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2252 WARN_ON(start > eb->len);
2253 WARN_ON(start + len > eb->start + eb->len);
2255 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2257 offset += start_offset;
2260 page = extent_buffer_page(eb, i);
2261 if (!PageUptodate(page)) {
2262 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2265 WARN_ON(!PageUptodate(page));
2267 cur = min(len, (PAGE_CACHE_SIZE - offset));
2268 kaddr = kmap_atomic(page, KM_USER0);
2269 memcpy(dst, kaddr + offset, cur);
2270 kunmap_atomic(kaddr, KM_USER0);
2278 EXPORT_SYMBOL(read_extent_buffer);
2280 static int __map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2281 unsigned long min_len, char **token, char **map,
2282 unsigned long *map_start,
2283 unsigned long *map_len, int km)
2285 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2288 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2289 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2290 unsigned long end_i = (start_offset + start + min_len) >>
2296 if (start >= eb->len) {
2297 printk("bad start in map eb start %Lu len %lu caller start %lu min %lu\n", eb->start, eb->len, start, min_len);
2302 offset = start_offset;
2306 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
2309 p = extent_buffer_page(eb, i);
2310 WARN_ON(!PageUptodate(p));
2311 kaddr = kmap_atomic(p, km);
2313 *map = kaddr + offset;
2314 *map_len = PAGE_CACHE_SIZE - offset;
2318 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2319 unsigned long min_len,
2320 char **token, char **map,
2321 unsigned long *map_start,
2322 unsigned long *map_len, int km)
2326 if (eb->map_token) {
2327 if (start >= eb->map_start &&
2328 start + min_len <= eb->map_start + eb->map_len) {
2329 *token = eb->map_token;
2331 *map_start = eb->map_start;
2332 *map_len = eb->map_len;
2335 unmap_extent_buffer(eb, eb->map_token, km);
2336 eb->map_token = NULL;
2339 err = __map_extent_buffer(eb, start, min_len, token, map,
2340 map_start, map_len, km);
2342 eb->map_token = *token;
2344 eb->map_start = *map_start;
2345 eb->map_len = *map_len;
2349 EXPORT_SYMBOL(map_extent_buffer);
2351 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2353 kunmap_atomic(token, km);
2355 EXPORT_SYMBOL(unmap_extent_buffer);
2357 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2358 unsigned long start,
2365 char *ptr = (char *)ptrv;
2366 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2367 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2370 WARN_ON(start > eb->len);
2371 WARN_ON(start + len > eb->start + eb->len);
2373 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2375 offset += start_offset;
2378 page = extent_buffer_page(eb, i);
2379 WARN_ON(!PageUptodate(page));
2381 cur = min(len, (PAGE_CACHE_SIZE - offset));
2383 kaddr = kmap_atomic(page, KM_USER0);
2384 ret = memcmp(ptr, kaddr + offset, cur);
2385 kunmap_atomic(kaddr, KM_USER0);
2396 EXPORT_SYMBOL(memcmp_extent_buffer);
2398 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2399 unsigned long start, unsigned long len)
2405 char *src = (char *)srcv;
2406 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2407 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2409 WARN_ON(start > eb->len);
2410 WARN_ON(start + len > eb->start + eb->len);
2412 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2414 offset += start_offset;
2417 page = extent_buffer_page(eb, i);
2418 WARN_ON(!PageUptodate(page));
2420 cur = min(len, PAGE_CACHE_SIZE - offset);
2421 kaddr = kmap_atomic(page, KM_USER0);
2422 memcpy(kaddr + offset, src, cur);
2423 kunmap_atomic(kaddr, KM_USER0);
2431 EXPORT_SYMBOL(write_extent_buffer);
2433 void memset_extent_buffer(struct extent_buffer *eb, char c,
2434 unsigned long start, unsigned long len)
2440 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2441 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2443 WARN_ON(start > eb->len);
2444 WARN_ON(start + len > eb->start + eb->len);
2446 offset = start & ((unsigned long)PAGE_CACHE_SIZE - 1);
2448 offset += start_offset;
2451 page = extent_buffer_page(eb, i);
2452 WARN_ON(!PageUptodate(page));
2454 cur = min(len, PAGE_CACHE_SIZE - offset);
2455 kaddr = kmap_atomic(page, KM_USER0);
2456 memset(kaddr + offset, c, cur);
2457 kunmap_atomic(kaddr, KM_USER0);
2464 EXPORT_SYMBOL(memset_extent_buffer);
2466 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2467 unsigned long dst_offset, unsigned long src_offset,
2470 u64 dst_len = dst->len;
2475 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2476 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2478 WARN_ON(src->len != dst_len);
2480 offset = dst_offset & ((unsigned long)PAGE_CACHE_SIZE - 1);
2482 offset += start_offset;
2485 page = extent_buffer_page(dst, i);
2486 WARN_ON(!PageUptodate(page));
2488 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2490 kaddr = kmap_atomic(page, KM_USER1);
2491 read_extent_buffer(src, kaddr + offset, src_offset, cur);
2492 kunmap_atomic(kaddr, KM_USER1);
2500 EXPORT_SYMBOL(copy_extent_buffer);
2502 static void move_pages(struct page *dst_page, struct page *src_page,
2503 unsigned long dst_off, unsigned long src_off,
2506 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2507 if (dst_page == src_page) {
2508 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2510 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
2511 char *p = dst_kaddr + dst_off + len;
2512 char *s = src_kaddr + src_off + len;
2517 kunmap_atomic(src_kaddr, KM_USER1);
2519 kunmap_atomic(dst_kaddr, KM_USER0);
2522 static void copy_pages(struct page *dst_page, struct page *src_page,
2523 unsigned long dst_off, unsigned long src_off,
2526 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
2529 if (dst_page != src_page)
2530 src_kaddr = kmap_atomic(src_page, KM_USER1);
2532 src_kaddr = dst_kaddr;
2534 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
2535 kunmap_atomic(dst_kaddr, KM_USER0);
2536 if (dst_page != src_page)
2537 kunmap_atomic(src_kaddr, KM_USER1);
2540 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2541 unsigned long src_offset, unsigned long len)
2544 size_t dst_off_in_page;
2545 size_t src_off_in_page;
2546 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2547 unsigned long dst_i;
2548 unsigned long src_i;
2550 if (src_offset + len > dst->len) {
2551 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2552 src_offset, len, dst->len);
2555 if (dst_offset + len > dst->len) {
2556 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2557 dst_offset, len, dst->len);
2562 dst_off_in_page = dst_offset &
2563 ((unsigned long)PAGE_CACHE_SIZE - 1);
2564 src_off_in_page = src_offset &
2565 ((unsigned long)PAGE_CACHE_SIZE - 1);
2567 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2568 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2571 src_off_in_page += start_offset;
2573 dst_off_in_page += start_offset;
2575 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2577 cur = min(cur, (unsigned long)(PAGE_CACHE_SIZE -
2580 copy_pages(extent_buffer_page(dst, dst_i),
2581 extent_buffer_page(dst, src_i),
2582 dst_off_in_page, src_off_in_page, cur);
2589 EXPORT_SYMBOL(memcpy_extent_buffer);
2591 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2592 unsigned long src_offset, unsigned long len)
2595 size_t dst_off_in_page;
2596 size_t src_off_in_page;
2597 unsigned long dst_end = dst_offset + len - 1;
2598 unsigned long src_end = src_offset + len - 1;
2599 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2600 unsigned long dst_i;
2601 unsigned long src_i;
2603 if (src_offset + len > dst->len) {
2604 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2605 src_offset, len, dst->len);
2608 if (dst_offset + len > dst->len) {
2609 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2610 dst_offset, len, dst->len);
2613 if (dst_offset < src_offset) {
2614 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2618 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2619 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2621 dst_off_in_page = dst_end &
2622 ((unsigned long)PAGE_CACHE_SIZE - 1);
2623 src_off_in_page = src_end &
2624 ((unsigned long)PAGE_CACHE_SIZE - 1);
2626 src_off_in_page += start_offset;
2628 dst_off_in_page += start_offset;
2630 cur = min(len, src_off_in_page + 1);
2631 cur = min(cur, dst_off_in_page + 1);
2632 move_pages(extent_buffer_page(dst, dst_i),
2633 extent_buffer_page(dst, src_i),
2634 dst_off_in_page - cur + 1,
2635 src_off_in_page - cur + 1, cur);
2642 EXPORT_SYMBOL(memmove_extent_buffer);