1 #include <linux/bitops.h>
2 #include <linux/slab.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
37 struct rb_node rb_node;
40 struct extent_page_data {
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
46 int __init extent_map_init(void)
48 extent_map_cache = btrfs_cache_create("extent_map",
49 sizeof(struct extent_map), 0,
51 if (!extent_map_cache)
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
56 if (!extent_state_cache)
58 extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 sizeof(struct extent_buffer), 0,
61 if (!extent_buffer_cache)
62 goto free_state_cache;
66 kmem_cache_destroy(extent_state_cache);
68 kmem_cache_destroy(extent_map_cache);
72 void extent_map_exit(void)
74 struct extent_state *state;
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 list_del(&state->list);
80 kmem_cache_free(extent_state_cache, state);
85 kmem_cache_destroy(extent_map_cache);
86 if (extent_state_cache)
87 kmem_cache_destroy(extent_state_cache);
88 if (extent_buffer_cache)
89 kmem_cache_destroy(extent_buffer_cache);
92 void extent_map_tree_init(struct extent_map_tree *tree,
93 struct address_space *mapping, gfp_t mask)
95 tree->map.rb_node = NULL;
96 tree->state.rb_node = NULL;
98 tree->dirty_bytes = 0;
99 rwlock_init(&tree->lock);
100 spin_lock_init(&tree->lru_lock);
101 tree->mapping = mapping;
102 INIT_LIST_HEAD(&tree->buffer_lru);
105 EXPORT_SYMBOL(extent_map_tree_init);
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
109 struct extent_buffer *eb;
110 while(!list_empty(&tree->buffer_lru)) {
111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
113 list_del_init(&eb->lru);
114 free_extent_buffer(eb);
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
119 struct extent_map *alloc_extent_map(gfp_t mask)
121 struct extent_map *em;
122 em = kmem_cache_alloc(extent_map_cache, mask);
123 if (!em || IS_ERR(em))
126 atomic_set(&em->refs, 1);
129 EXPORT_SYMBOL(alloc_extent_map);
131 void free_extent_map(struct extent_map *em)
135 if (atomic_dec_and_test(&em->refs)) {
136 WARN_ON(em->in_tree);
137 kmem_cache_free(extent_map_cache, em);
140 EXPORT_SYMBOL(free_extent_map);
143 struct extent_state *alloc_extent_state(gfp_t mask)
145 struct extent_state *state;
148 state = kmem_cache_alloc(extent_state_cache, mask);
149 if (!state || IS_ERR(state))
155 spin_lock_irqsave(&state_lock, flags);
156 list_add(&state->list, &states);
157 spin_unlock_irqrestore(&state_lock, flags);
159 atomic_set(&state->refs, 1);
160 init_waitqueue_head(&state->wq);
163 EXPORT_SYMBOL(alloc_extent_state);
165 void free_extent_state(struct extent_state *state)
170 if (atomic_dec_and_test(&state->refs)) {
171 WARN_ON(state->in_tree);
172 spin_lock_irqsave(&state_lock, flags);
173 list_del(&state->list);
174 spin_unlock_irqrestore(&state_lock, flags);
175 kmem_cache_free(extent_state_cache, state);
178 EXPORT_SYMBOL(free_extent_state);
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 struct rb_node *node)
183 struct rb_node ** p = &root->rb_node;
184 struct rb_node * parent = NULL;
185 struct tree_entry *entry;
189 entry = rb_entry(parent, struct tree_entry, rb_node);
191 if (offset < entry->start)
193 else if (offset > entry->end)
199 entry = rb_entry(node, struct tree_entry, rb_node);
201 rb_link_node(node, parent, p);
202 rb_insert_color(node, root);
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 struct rb_node **prev_ret)
209 struct rb_node * n = root->rb_node;
210 struct rb_node *prev = NULL;
211 struct tree_entry *entry;
212 struct tree_entry *prev_entry = NULL;
215 entry = rb_entry(n, struct tree_entry, rb_node);
219 if (offset < entry->start)
221 else if (offset > entry->end)
228 while(prev && offset > prev_entry->end) {
229 prev = rb_next(prev);
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
238 struct rb_node *prev;
240 ret = __tree_search(root, offset, &prev);
246 static int tree_delete(struct rb_root *root, u64 offset)
248 struct rb_node *node;
249 struct tree_entry *entry;
251 node = __tree_search(root, offset, NULL);
254 entry = rb_entry(node, struct tree_entry, rb_node);
256 rb_erase(node, root);
261 * add_extent_mapping tries a simple backward merge with existing
262 * mappings. The extent_map struct passed in will be inserted into
263 * the tree directly (no copies made, just a reference taken).
265 int add_extent_mapping(struct extent_map_tree *tree,
266 struct extent_map *em)
269 struct extent_map *prev = NULL;
272 write_lock_irq(&tree->lock);
273 rb = tree_insert(&tree->map, em->end, &em->rb_node);
275 prev = rb_entry(rb, struct extent_map, rb_node);
279 atomic_inc(&em->refs);
280 if (em->start != 0) {
281 rb = rb_prev(&em->rb_node);
283 prev = rb_entry(rb, struct extent_map, rb_node);
284 if (prev && prev->end + 1 == em->start &&
285 ((em->block_start == EXTENT_MAP_HOLE &&
286 prev->block_start == EXTENT_MAP_HOLE) ||
287 (em->block_start == EXTENT_MAP_INLINE &&
288 prev->block_start == EXTENT_MAP_INLINE) ||
289 (em->block_start == EXTENT_MAP_DELALLOC &&
290 prev->block_start == EXTENT_MAP_DELALLOC) ||
291 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
292 em->block_start == prev->block_end + 1))) {
293 em->start = prev->start;
294 em->block_start = prev->block_start;
295 rb_erase(&prev->rb_node, &tree->map);
297 free_extent_map(prev);
301 write_unlock_irq(&tree->lock);
304 EXPORT_SYMBOL(add_extent_mapping);
307 * lookup_extent_mapping returns the first extent_map struct in the
308 * tree that intersects the [start, end] (inclusive) range. There may
309 * be additional objects in the tree that intersect, so check the object
310 * returned carefully to make sure you don't need additional lookups.
312 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
315 struct extent_map *em;
316 struct rb_node *rb_node;
318 read_lock_irq(&tree->lock);
319 rb_node = tree_search(&tree->map, start);
324 if (IS_ERR(rb_node)) {
325 em = ERR_PTR(PTR_ERR(rb_node));
328 em = rb_entry(rb_node, struct extent_map, rb_node);
329 if (em->end < start || em->start > end) {
333 atomic_inc(&em->refs);
335 read_unlock_irq(&tree->lock);
338 EXPORT_SYMBOL(lookup_extent_mapping);
341 * removes an extent_map struct from the tree. No reference counts are
342 * dropped, and no checks are done to see if the range is in use
344 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
348 write_lock_irq(&tree->lock);
349 ret = tree_delete(&tree->map, em->end);
350 write_unlock_irq(&tree->lock);
353 EXPORT_SYMBOL(remove_extent_mapping);
356 * utility function to look for merge candidates inside a given range.
357 * Any extents with matching state are merged together into a single
358 * extent in the tree. Extents with EXTENT_IO in their state field
359 * are not merged because the end_io handlers need to be able to do
360 * operations on them without sleeping (or doing allocations/splits).
362 * This should be called with the tree lock held.
364 static int merge_state(struct extent_map_tree *tree,
365 struct extent_state *state)
367 struct extent_state *other;
368 struct rb_node *other_node;
370 if (state->state & EXTENT_IOBITS)
373 other_node = rb_prev(&state->rb_node);
375 other = rb_entry(other_node, struct extent_state, rb_node);
376 if (other->end == state->start - 1 &&
377 other->state == state->state) {
378 state->start = other->start;
380 rb_erase(&other->rb_node, &tree->state);
381 free_extent_state(other);
384 other_node = rb_next(&state->rb_node);
386 other = rb_entry(other_node, struct extent_state, rb_node);
387 if (other->start == state->end + 1 &&
388 other->state == state->state) {
389 other->start = state->start;
391 rb_erase(&state->rb_node, &tree->state);
392 free_extent_state(state);
399 * insert an extent_state struct into the tree. 'bits' are set on the
400 * struct before it is inserted.
402 * This may return -EEXIST if the extent is already there, in which case the
403 * state struct is freed.
405 * The tree lock is not taken internally. This is a utility function and
406 * probably isn't what you want to call (see set/clear_extent_bit).
408 static int insert_state(struct extent_map_tree *tree,
409 struct extent_state *state, u64 start, u64 end,
412 struct rb_node *node;
415 printk("end < start %Lu %Lu\n", end, start);
418 if (bits & EXTENT_DIRTY)
419 tree->dirty_bytes += end - start + 1;
420 state->state |= bits;
421 state->start = start;
423 node = tree_insert(&tree->state, end, &state->rb_node);
425 struct extent_state *found;
426 found = rb_entry(node, struct extent_state, rb_node);
427 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
428 free_extent_state(state);
431 merge_state(tree, state);
436 * split a given extent state struct in two, inserting the preallocated
437 * struct 'prealloc' as the newly created second half. 'split' indicates an
438 * offset inside 'orig' where it should be split.
441 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
442 * are two extent state structs in the tree:
443 * prealloc: [orig->start, split - 1]
444 * orig: [ split, orig->end ]
446 * The tree locks are not taken by this function. They need to be held
449 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
450 struct extent_state *prealloc, u64 split)
452 struct rb_node *node;
453 prealloc->start = orig->start;
454 prealloc->end = split - 1;
455 prealloc->state = orig->state;
458 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
460 struct extent_state *found;
461 found = rb_entry(node, struct extent_state, rb_node);
462 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
463 free_extent_state(prealloc);
470 * utility function to clear some bits in an extent state struct.
471 * it will optionally wake up any one waiting on this state (wake == 1), or
472 * forcibly remove the state from the tree (delete == 1).
474 * If no bits are set on the state struct after clearing things, the
475 * struct is freed and removed from the tree
477 static int clear_state_bit(struct extent_map_tree *tree,
478 struct extent_state *state, int bits, int wake,
481 int ret = state->state & bits;
483 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
484 u64 range = state->end - state->start + 1;
485 WARN_ON(range > tree->dirty_bytes);
486 tree->dirty_bytes -= range;
488 state->state &= ~bits;
491 if (delete || state->state == 0) {
492 if (state->in_tree) {
493 rb_erase(&state->rb_node, &tree->state);
495 free_extent_state(state);
500 merge_state(tree, state);
506 * clear some bits on a range in the tree. This may require splitting
507 * or inserting elements in the tree, so the gfp mask is used to
508 * indicate which allocations or sleeping are allowed.
510 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
511 * the given range from the tree regardless of state (ie for truncate).
513 * the range [start, end] is inclusive.
515 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
516 * bits were already set, or zero if none of the bits were already set.
518 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
519 int bits, int wake, int delete, gfp_t mask)
521 struct extent_state *state;
522 struct extent_state *prealloc = NULL;
523 struct rb_node *node;
529 if (!prealloc && (mask & __GFP_WAIT)) {
530 prealloc = alloc_extent_state(mask);
535 write_lock_irqsave(&tree->lock, flags);
537 * this search will find the extents that end after
540 node = tree_search(&tree->state, start);
543 state = rb_entry(node, struct extent_state, rb_node);
544 if (state->start > end)
546 WARN_ON(state->end < start);
549 * | ---- desired range ---- |
551 * | ------------- state -------------- |
553 * We need to split the extent we found, and may flip
554 * bits on second half.
556 * If the extent we found extends past our range, we
557 * just split and search again. It'll get split again
558 * the next time though.
560 * If the extent we found is inside our range, we clear
561 * the desired bit on it.
564 if (state->start < start) {
565 err = split_state(tree, state, prealloc, start);
566 BUG_ON(err == -EEXIST);
570 if (state->end <= end) {
571 start = state->end + 1;
572 set |= clear_state_bit(tree, state, bits,
575 start = state->start;
580 * | ---- desired range ---- |
582 * We need to split the extent, and clear the bit
585 if (state->start <= end && state->end > end) {
586 err = split_state(tree, state, prealloc, end + 1);
587 BUG_ON(err == -EEXIST);
591 set |= clear_state_bit(tree, prealloc, bits,
597 start = state->end + 1;
598 set |= clear_state_bit(tree, state, bits, wake, delete);
602 write_unlock_irqrestore(&tree->lock, flags);
604 free_extent_state(prealloc);
611 write_unlock_irqrestore(&tree->lock, flags);
612 if (mask & __GFP_WAIT)
616 EXPORT_SYMBOL(clear_extent_bit);
618 static int wait_on_state(struct extent_map_tree *tree,
619 struct extent_state *state)
622 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
623 read_unlock_irq(&tree->lock);
625 read_lock_irq(&tree->lock);
626 finish_wait(&state->wq, &wait);
631 * waits for one or more bits to clear on a range in the state tree.
632 * The range [start, end] is inclusive.
633 * The tree lock is taken by this function
635 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
637 struct extent_state *state;
638 struct rb_node *node;
640 read_lock_irq(&tree->lock);
644 * this search will find all the extents that end after
647 node = tree_search(&tree->state, start);
651 state = rb_entry(node, struct extent_state, rb_node);
653 if (state->start > end)
656 if (state->state & bits) {
657 start = state->start;
658 atomic_inc(&state->refs);
659 wait_on_state(tree, state);
660 free_extent_state(state);
663 start = state->end + 1;
668 if (need_resched()) {
669 read_unlock_irq(&tree->lock);
671 read_lock_irq(&tree->lock);
675 read_unlock_irq(&tree->lock);
678 EXPORT_SYMBOL(wait_extent_bit);
680 static void set_state_bits(struct extent_map_tree *tree,
681 struct extent_state *state,
684 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
685 u64 range = state->end - state->start + 1;
686 tree->dirty_bytes += range;
688 state->state |= bits;
692 * set some bits on a range in the tree. This may require allocations
693 * or sleeping, so the gfp mask is used to indicate what is allowed.
695 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
696 * range already has the desired bits set. The start of the existing
697 * range is returned in failed_start in this case.
699 * [start, end] is inclusive
700 * This takes the tree lock.
702 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
703 int exclusive, u64 *failed_start, gfp_t mask)
705 struct extent_state *state;
706 struct extent_state *prealloc = NULL;
707 struct rb_node *node;
714 if (!prealloc && (mask & __GFP_WAIT)) {
715 prealloc = alloc_extent_state(mask);
720 write_lock_irqsave(&tree->lock, flags);
722 * this search will find all the extents that end after
725 node = tree_search(&tree->state, start);
727 err = insert_state(tree, prealloc, start, end, bits);
729 BUG_ON(err == -EEXIST);
733 state = rb_entry(node, struct extent_state, rb_node);
734 last_start = state->start;
735 last_end = state->end;
738 * | ---- desired range ---- |
741 * Just lock what we found and keep going
743 if (state->start == start && state->end <= end) {
744 set = state->state & bits;
745 if (set && exclusive) {
746 *failed_start = state->start;
750 set_state_bits(tree, state, bits);
751 start = state->end + 1;
752 merge_state(tree, state);
757 * | ---- desired range ---- |
760 * | ------------- state -------------- |
762 * We need to split the extent we found, and may flip bits on
765 * If the extent we found extends past our
766 * range, we just split and search again. It'll get split
767 * again the next time though.
769 * If the extent we found is inside our range, we set the
772 if (state->start < start) {
773 set = state->state & bits;
774 if (exclusive && set) {
775 *failed_start = start;
779 err = split_state(tree, state, prealloc, start);
780 BUG_ON(err == -EEXIST);
784 if (state->end <= end) {
785 set_state_bits(tree, state, bits);
786 start = state->end + 1;
787 merge_state(tree, state);
789 start = state->start;
794 * | ---- desired range ---- |
795 * | state | or | state |
797 * There's a hole, we need to insert something in it and
798 * ignore the extent we found.
800 if (state->start > start) {
802 if (end < last_start)
805 this_end = last_start -1;
806 err = insert_state(tree, prealloc, start, this_end,
809 BUG_ON(err == -EEXIST);
812 start = this_end + 1;
816 * | ---- desired range ---- |
818 * We need to split the extent, and set the bit
821 if (state->start <= end && state->end > end) {
822 set = state->state & bits;
823 if (exclusive && set) {
824 *failed_start = start;
828 err = split_state(tree, state, prealloc, end + 1);
829 BUG_ON(err == -EEXIST);
831 set_state_bits(tree, prealloc, bits);
832 merge_state(tree, prealloc);
840 write_unlock_irqrestore(&tree->lock, flags);
842 free_extent_state(prealloc);
849 write_unlock_irqrestore(&tree->lock, flags);
850 if (mask & __GFP_WAIT)
854 EXPORT_SYMBOL(set_extent_bit);
856 /* wrappers around set/clear extent bit */
857 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
860 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
863 EXPORT_SYMBOL(set_extent_dirty);
865 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
866 int bits, gfp_t mask)
868 return set_extent_bit(tree, start, end, bits, 0, NULL,
871 EXPORT_SYMBOL(set_extent_bits);
873 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
874 int bits, gfp_t mask)
876 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
878 EXPORT_SYMBOL(clear_extent_bits);
880 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
883 return set_extent_bit(tree, start, end,
884 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
887 EXPORT_SYMBOL(set_extent_delalloc);
889 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
892 return clear_extent_bit(tree, start, end,
893 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
895 EXPORT_SYMBOL(clear_extent_dirty);
897 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
900 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
903 EXPORT_SYMBOL(set_extent_new);
905 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
908 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
910 EXPORT_SYMBOL(clear_extent_new);
912 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
915 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
918 EXPORT_SYMBOL(set_extent_uptodate);
920 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
923 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
925 EXPORT_SYMBOL(clear_extent_uptodate);
927 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
930 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
933 EXPORT_SYMBOL(set_extent_writeback);
935 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
938 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
940 EXPORT_SYMBOL(clear_extent_writeback);
942 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
944 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
946 EXPORT_SYMBOL(wait_on_extent_writeback);
949 * locks a range in ascending order, waiting for any locked regions
950 * it hits on the way. [start,end] are inclusive, and this will sleep.
952 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
957 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
958 &failed_start, mask);
959 if (err == -EEXIST && (mask & __GFP_WAIT)) {
960 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
961 start = failed_start;
965 WARN_ON(start > end);
969 EXPORT_SYMBOL(lock_extent);
971 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
974 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
976 EXPORT_SYMBOL(unlock_extent);
979 * helper function to set pages and extents in the tree dirty
981 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
983 unsigned long index = start >> PAGE_CACHE_SHIFT;
984 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
987 while (index <= end_index) {
988 page = find_get_page(tree->mapping, index);
990 __set_page_dirty_nobuffers(page);
991 page_cache_release(page);
994 set_extent_dirty(tree, start, end, GFP_NOFS);
997 EXPORT_SYMBOL(set_range_dirty);
1000 * helper function to set both pages and extents in the tree writeback
1002 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1004 unsigned long index = start >> PAGE_CACHE_SHIFT;
1005 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1008 while (index <= end_index) {
1009 page = find_get_page(tree->mapping, index);
1011 set_page_writeback(page);
1012 page_cache_release(page);
1015 set_extent_writeback(tree, start, end, GFP_NOFS);
1018 EXPORT_SYMBOL(set_range_writeback);
1020 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1021 u64 *start_ret, u64 *end_ret, int bits)
1023 struct rb_node *node;
1024 struct extent_state *state;
1027 read_lock_irq(&tree->lock);
1029 * this search will find all the extents that end after
1032 node = tree_search(&tree->state, start);
1033 if (!node || IS_ERR(node)) {
1038 state = rb_entry(node, struct extent_state, rb_node);
1039 if (state->end >= start && (state->state & bits)) {
1040 *start_ret = state->start;
1041 *end_ret = state->end;
1045 node = rb_next(node);
1050 read_unlock_irq(&tree->lock);
1053 EXPORT_SYMBOL(find_first_extent_bit);
1055 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1056 u64 *start, u64 *end, u64 max_bytes)
1058 struct rb_node *node;
1059 struct extent_state *state;
1060 u64 cur_start = *start;
1062 u64 total_bytes = 0;
1064 write_lock_irq(&tree->lock);
1066 * this search will find all the extents that end after
1070 node = tree_search(&tree->state, cur_start);
1071 if (!node || IS_ERR(node)) {
1077 state = rb_entry(node, struct extent_state, rb_node);
1078 if (found && state->start != cur_start) {
1081 if (!(state->state & EXTENT_DELALLOC)) {
1087 struct extent_state *prev_state;
1088 struct rb_node *prev_node = node;
1090 prev_node = rb_prev(prev_node);
1093 prev_state = rb_entry(prev_node,
1094 struct extent_state,
1096 if (!(prev_state->state & EXTENT_DELALLOC))
1102 if (state->state & EXTENT_LOCKED) {
1104 atomic_inc(&state->refs);
1105 prepare_to_wait(&state->wq, &wait,
1106 TASK_UNINTERRUPTIBLE);
1107 write_unlock_irq(&tree->lock);
1109 write_lock_irq(&tree->lock);
1110 finish_wait(&state->wq, &wait);
1111 free_extent_state(state);
1114 state->state |= EXTENT_LOCKED;
1116 *start = state->start;
1119 cur_start = state->end + 1;
1120 node = rb_next(node);
1123 total_bytes += state->end - state->start + 1;
1124 if (total_bytes >= max_bytes)
1128 write_unlock_irq(&tree->lock);
1132 u64 count_range_bits(struct extent_map_tree *tree,
1133 u64 *start, u64 search_end, u64 max_bytes,
1136 struct rb_node *node;
1137 struct extent_state *state;
1138 u64 cur_start = *start;
1139 u64 total_bytes = 0;
1142 if (search_end <= cur_start) {
1143 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1148 write_lock_irq(&tree->lock);
1149 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1150 total_bytes = tree->dirty_bytes;
1154 * this search will find all the extents that end after
1157 node = tree_search(&tree->state, cur_start);
1158 if (!node || IS_ERR(node)) {
1163 state = rb_entry(node, struct extent_state, rb_node);
1164 if (state->start > search_end)
1166 if (state->end >= cur_start && (state->state & bits)) {
1167 total_bytes += min(search_end, state->end) + 1 -
1168 max(cur_start, state->start);
1169 if (total_bytes >= max_bytes)
1172 *start = state->start;
1176 node = rb_next(node);
1181 write_unlock_irq(&tree->lock);
1185 * helper function to lock both pages and extents in the tree.
1186 * pages must be locked first.
1188 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1190 unsigned long index = start >> PAGE_CACHE_SHIFT;
1191 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1195 while (index <= end_index) {
1196 page = grab_cache_page(tree->mapping, index);
1202 err = PTR_ERR(page);
1207 lock_extent(tree, start, end, GFP_NOFS);
1212 * we failed above in getting the page at 'index', so we undo here
1213 * up to but not including the page at 'index'
1216 index = start >> PAGE_CACHE_SHIFT;
1217 while (index < end_index) {
1218 page = find_get_page(tree->mapping, index);
1220 page_cache_release(page);
1225 EXPORT_SYMBOL(lock_range);
1228 * helper function to unlock both pages and extents in the tree.
1230 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1232 unsigned long index = start >> PAGE_CACHE_SHIFT;
1233 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1236 while (index <= end_index) {
1237 page = find_get_page(tree->mapping, index);
1239 page_cache_release(page);
1242 unlock_extent(tree, start, end, GFP_NOFS);
1245 EXPORT_SYMBOL(unlock_range);
1247 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1249 struct rb_node *node;
1250 struct extent_state *state;
1253 write_lock_irq(&tree->lock);
1255 * this search will find all the extents that end after
1258 node = tree_search(&tree->state, start);
1259 if (!node || IS_ERR(node)) {
1263 state = rb_entry(node, struct extent_state, rb_node);
1264 if (state->start != start) {
1268 state->private = private;
1270 write_unlock_irq(&tree->lock);
1274 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1276 struct rb_node *node;
1277 struct extent_state *state;
1280 read_lock_irq(&tree->lock);
1282 * this search will find all the extents that end after
1285 node = tree_search(&tree->state, start);
1286 if (!node || IS_ERR(node)) {
1290 state = rb_entry(node, struct extent_state, rb_node);
1291 if (state->start != start) {
1295 *private = state->private;
1297 read_unlock_irq(&tree->lock);
1302 * searches a range in the state tree for a given mask.
1303 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1304 * has the bits set. Otherwise, 1 is returned if any bit in the
1305 * range is found set.
1307 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1308 int bits, int filled)
1310 struct extent_state *state = NULL;
1311 struct rb_node *node;
1314 read_lock_irq(&tree->lock);
1315 node = tree_search(&tree->state, start);
1316 while (node && start <= end) {
1317 state = rb_entry(node, struct extent_state, rb_node);
1319 if (filled && state->start > start) {
1324 if (state->start > end)
1327 if (state->state & bits) {
1331 } else if (filled) {
1335 start = state->end + 1;
1338 node = rb_next(node);
1340 read_unlock_irq(&tree->lock);
1343 EXPORT_SYMBOL(test_range_bit);
1346 * helper function to set a given page up to date if all the
1347 * extents in the tree for that page are up to date
1349 static int check_page_uptodate(struct extent_map_tree *tree,
1352 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1353 u64 end = start + PAGE_CACHE_SIZE - 1;
1354 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1355 SetPageUptodate(page);
1360 * helper function to unlock a page if all the extents in the tree
1361 * for that page are unlocked
1363 static int check_page_locked(struct extent_map_tree *tree,
1366 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1367 u64 end = start + PAGE_CACHE_SIZE - 1;
1368 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1374 * helper function to end page writeback if all the extents
1375 * in the tree for that page are done with writeback
1377 static int check_page_writeback(struct extent_map_tree *tree,
1380 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1381 u64 end = start + PAGE_CACHE_SIZE - 1;
1382 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1383 end_page_writeback(page);
1387 /* lots and lots of room for performance fixes in the end_bio funcs */
1390 * after a writepage IO is done, we need to:
1391 * clear the uptodate bits on error
1392 * clear the writeback bits in the extent tree for this IO
1393 * end_page_writeback if the page has no more pending IO
1395 * Scheduling is not allowed, so the extent state tree is expected
1396 * to have one and only one object corresponding to this IO.
1398 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1399 static void end_bio_extent_writepage(struct bio *bio, int err)
1401 static int end_bio_extent_writepage(struct bio *bio,
1402 unsigned int bytes_done, int err)
1405 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1406 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1407 struct extent_map_tree *tree = bio->bi_private;
1412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1418 struct page *page = bvec->bv_page;
1419 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1421 end = start + bvec->bv_len - 1;
1423 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1428 if (--bvec >= bio->bi_io_vec)
1429 prefetchw(&bvec->bv_page->flags);
1432 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1433 ClearPageUptodate(page);
1436 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1439 end_page_writeback(page);
1441 check_page_writeback(tree, page);
1442 if (tree->ops && tree->ops->writepage_end_io_hook)
1443 tree->ops->writepage_end_io_hook(page, start, end);
1444 } while (bvec >= bio->bi_io_vec);
1447 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1453 * after a readpage IO is done, we need to:
1454 * clear the uptodate bits on error
1455 * set the uptodate bits if things worked
1456 * set the page up to date if all extents in the tree are uptodate
1457 * clear the lock bit in the extent tree
1458 * unlock the page if there are no other extents locked for it
1460 * Scheduling is not allowed, so the extent state tree is expected
1461 * to have one and only one object corresponding to this IO.
1463 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1464 static void end_bio_extent_readpage(struct bio *bio, int err)
1466 static int end_bio_extent_readpage(struct bio *bio,
1467 unsigned int bytes_done, int err)
1470 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1471 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1472 struct extent_map_tree *tree = bio->bi_private;
1478 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1484 struct page *page = bvec->bv_page;
1485 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1487 end = start + bvec->bv_len - 1;
1489 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1494 if (--bvec >= bio->bi_io_vec)
1495 prefetchw(&bvec->bv_page->flags);
1497 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1498 ret = tree->ops->readpage_end_io_hook(page, start, end);
1503 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1505 SetPageUptodate(page);
1507 check_page_uptodate(tree, page);
1509 ClearPageUptodate(page);
1513 unlock_extent(tree, start, end, GFP_ATOMIC);
1518 check_page_locked(tree, page);
1519 } while (bvec >= bio->bi_io_vec);
1522 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1528 * IO done from prepare_write is pretty simple, we just unlock
1529 * the structs in the extent tree when done, and set the uptodate bits
1532 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1533 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1535 static int end_bio_extent_preparewrite(struct bio *bio,
1536 unsigned int bytes_done, int err)
1539 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1540 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1541 struct extent_map_tree *tree = bio->bi_private;
1545 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1551 struct page *page = bvec->bv_page;
1552 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1554 end = start + bvec->bv_len - 1;
1556 if (--bvec >= bio->bi_io_vec)
1557 prefetchw(&bvec->bv_page->flags);
1560 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1562 ClearPageUptodate(page);
1566 unlock_extent(tree, start, end, GFP_ATOMIC);
1568 } while (bvec >= bio->bi_io_vec);
1571 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1577 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1582 bio = bio_alloc(gfp_flags, nr_vecs);
1584 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1585 while (!bio && (nr_vecs /= 2))
1586 bio = bio_alloc(gfp_flags, nr_vecs);
1590 bio->bi_bdev = bdev;
1591 bio->bi_sector = first_sector;
1596 static int submit_one_bio(int rw, struct bio *bio)
1603 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1604 if (maxsector < bio->bi_sector) {
1605 printk("sector too large max %Lu got %llu\n", maxsector,
1606 (unsigned long long)bio->bi_sector);
1610 submit_bio(rw, bio);
1611 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1617 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1618 struct page *page, sector_t sector,
1619 size_t size, unsigned long offset,
1620 struct block_device *bdev,
1621 struct bio **bio_ret,
1622 unsigned long max_pages,
1623 bio_end_io_t end_io_func)
1629 if (bio_ret && *bio_ret) {
1631 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1632 bio_add_page(bio, page, size, offset) < size) {
1633 ret = submit_one_bio(rw, bio);
1639 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1640 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1642 printk("failed to allocate bio nr %d\n", nr);
1644 bio_add_page(bio, page, size, offset);
1645 bio->bi_end_io = end_io_func;
1646 bio->bi_private = tree;
1650 ret = submit_one_bio(rw, bio);
1656 void set_page_extent_mapped(struct page *page)
1658 if (!PagePrivate(page)) {
1659 SetPagePrivate(page);
1660 WARN_ON(!page->mapping->a_ops->invalidatepage);
1661 set_page_private(page, EXTENT_PAGE_PRIVATE);
1662 page_cache_get(page);
1667 * basic readpage implementation. Locked extent state structs are inserted
1668 * into the tree that are removed when the IO is done (by the end_io
1671 static int __extent_read_full_page(struct extent_map_tree *tree,
1673 get_extent_t *get_extent,
1676 struct inode *inode = page->mapping->host;
1677 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1678 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1682 u64 last_byte = i_size_read(inode);
1686 struct extent_map *em;
1687 struct block_device *bdev;
1690 size_t page_offset = 0;
1692 size_t blocksize = inode->i_sb->s_blocksize;
1694 set_page_extent_mapped(page);
1697 lock_extent(tree, start, end, GFP_NOFS);
1699 while (cur <= end) {
1700 if (cur >= last_byte) {
1702 iosize = PAGE_CACHE_SIZE - page_offset;
1703 userpage = kmap_atomic(page, KM_USER0);
1704 memset(userpage + page_offset, 0, iosize);
1705 flush_dcache_page(page);
1706 kunmap_atomic(userpage, KM_USER0);
1707 set_extent_uptodate(tree, cur, cur + iosize - 1,
1709 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1712 em = get_extent(inode, page, page_offset, cur, end, 0);
1713 if (IS_ERR(em) || !em) {
1715 unlock_extent(tree, cur, end, GFP_NOFS);
1719 extent_offset = cur - em->start;
1720 BUG_ON(em->end < cur);
1723 iosize = min(em->end - cur, end - cur) + 1;
1724 cur_end = min(em->end, end);
1725 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1726 sector = (em->block_start + extent_offset) >> 9;
1728 block_start = em->block_start;
1729 free_extent_map(em);
1732 /* we've found a hole, just zero and go on */
1733 if (block_start == EXTENT_MAP_HOLE) {
1735 userpage = kmap_atomic(page, KM_USER0);
1736 memset(userpage + page_offset, 0, iosize);
1737 flush_dcache_page(page);
1738 kunmap_atomic(userpage, KM_USER0);
1740 set_extent_uptodate(tree, cur, cur + iosize - 1,
1742 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1744 page_offset += iosize;
1747 /* the get_extent function already copied into the page */
1748 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1749 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1751 page_offset += iosize;
1756 if (tree->ops && tree->ops->readpage_io_hook) {
1757 ret = tree->ops->readpage_io_hook(page, cur,
1761 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1763 ret = submit_extent_page(READ, tree, page,
1764 sector, iosize, page_offset,
1766 end_bio_extent_readpage);
1771 page_offset += iosize;
1775 if (!PageError(page))
1776 SetPageUptodate(page);
1782 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1783 get_extent_t *get_extent)
1785 struct bio *bio = NULL;
1788 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1790 submit_one_bio(READ, bio);
1793 EXPORT_SYMBOL(extent_read_full_page);
1796 * the writepage semantics are similar to regular writepage. extent
1797 * records are inserted to lock ranges in the tree, and as dirty areas
1798 * are found, they are marked writeback. Then the lock bits are removed
1799 * and the end_io handler clears the writeback ranges
1801 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1804 struct inode *inode = page->mapping->host;
1805 struct extent_page_data *epd = data;
1806 struct extent_map_tree *tree = epd->tree;
1807 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1809 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1813 u64 last_byte = i_size_read(inode);
1817 struct extent_map *em;
1818 struct block_device *bdev;
1821 size_t page_offset = 0;
1823 loff_t i_size = i_size_read(inode);
1824 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1828 WARN_ON(!PageLocked(page));
1829 if (page->index > end_index) {
1830 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1835 if (page->index == end_index) {
1838 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1840 userpage = kmap_atomic(page, KM_USER0);
1841 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1842 flush_dcache_page(page);
1843 kunmap_atomic(userpage, KM_USER0);
1846 set_page_extent_mapped(page);
1848 delalloc_start = start;
1850 while(delalloc_end < page_end) {
1851 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1854 if (nr_delalloc == 0) {
1855 delalloc_start = delalloc_end + 1;
1858 tree->ops->fill_delalloc(inode, delalloc_start,
1860 clear_extent_bit(tree, delalloc_start,
1862 EXTENT_LOCKED | EXTENT_DELALLOC,
1864 delalloc_start = delalloc_end + 1;
1866 lock_extent(tree, start, page_end, GFP_NOFS);
1869 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1870 printk("found delalloc bits after lock_extent\n");
1873 if (last_byte <= start) {
1874 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1878 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1879 blocksize = inode->i_sb->s_blocksize;
1881 while (cur <= end) {
1882 if (cur >= last_byte) {
1883 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1886 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1887 if (IS_ERR(em) || !em) {
1892 extent_offset = cur - em->start;
1893 BUG_ON(em->end < cur);
1895 iosize = min(em->end - cur, end - cur) + 1;
1896 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1897 sector = (em->block_start + extent_offset) >> 9;
1899 block_start = em->block_start;
1900 free_extent_map(em);
1903 if (block_start == EXTENT_MAP_HOLE ||
1904 block_start == EXTENT_MAP_INLINE) {
1905 clear_extent_dirty(tree, cur,
1906 cur + iosize - 1, GFP_NOFS);
1908 page_offset += iosize;
1912 /* leave this out until we have a page_mkwrite call */
1913 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1916 page_offset += iosize;
1919 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1920 if (tree->ops && tree->ops->writepage_io_hook) {
1921 ret = tree->ops->writepage_io_hook(page, cur,
1929 unsigned long max_nr = end_index + 1;
1930 set_range_writeback(tree, cur, cur + iosize - 1);
1931 if (!PageWriteback(page)) {
1932 printk("warning page %lu not writeback, "
1933 "cur %llu end %llu\n", page->index,
1934 (unsigned long long)cur,
1935 (unsigned long long)end);
1938 ret = submit_extent_page(WRITE, tree, page, sector,
1939 iosize, page_offset, bdev,
1941 end_bio_extent_writepage);
1946 page_offset += iosize;
1951 /* make sure the mapping tag for page dirty gets cleared */
1952 set_page_writeback(page);
1953 end_page_writeback(page);
1955 unlock_extent(tree, start, page_end, GFP_NOFS);
1960 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1962 /* Taken directly from 2.6.23 for 2.6.18 back port */
1963 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1967 * write_cache_pages - walk the list of dirty pages of the given address space
1968 * and write all of them.
1969 * @mapping: address space structure to write
1970 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1971 * @writepage: function called for each page
1972 * @data: data passed to writepage function
1974 * If a page is already under I/O, write_cache_pages() skips it, even
1975 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1976 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1977 * and msync() need to guarantee that all the data which was dirty at the time
1978 * the call was made get new I/O started against them. If wbc->sync_mode is
1979 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1980 * existing IO to complete.
1982 static int write_cache_pages(struct address_space *mapping,
1983 struct writeback_control *wbc, writepage_t writepage,
1986 struct backing_dev_info *bdi = mapping->backing_dev_info;
1989 struct pagevec pvec;
1992 pgoff_t end; /* Inclusive */
1994 int range_whole = 0;
1996 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1997 wbc->encountered_congestion = 1;
2001 pagevec_init(&pvec, 0);
2002 if (wbc->range_cyclic) {
2003 index = mapping->writeback_index; /* Start from prev offset */
2006 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2007 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2008 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2013 while (!done && (index <= end) &&
2014 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2015 PAGECACHE_TAG_DIRTY,
2016 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2020 for (i = 0; i < nr_pages; i++) {
2021 struct page *page = pvec.pages[i];
2024 * At this point we hold neither mapping->tree_lock nor
2025 * lock on the page itself: the page may be truncated or
2026 * invalidated (changing page->mapping to NULL), or even
2027 * swizzled back from swapper_space to tmpfs file
2032 if (unlikely(page->mapping != mapping)) {
2037 if (!wbc->range_cyclic && page->index > end) {
2043 if (wbc->sync_mode != WB_SYNC_NONE)
2044 wait_on_page_writeback(page);
2046 if (PageWriteback(page) ||
2047 !clear_page_dirty_for_io(page)) {
2052 ret = (*writepage)(page, wbc, data);
2054 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2058 if (ret || (--(wbc->nr_to_write) <= 0))
2060 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2061 wbc->encountered_congestion = 1;
2065 pagevec_release(&pvec);
2068 if (!scanned && !done) {
2070 * We hit the last page and there is more work to be done: wrap
2071 * back to the start of the file
2077 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2078 mapping->writeback_index = index;
2083 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2084 get_extent_t *get_extent,
2085 struct writeback_control *wbc)
2088 struct address_space *mapping = page->mapping;
2089 struct extent_page_data epd = {
2092 .get_extent = get_extent,
2094 struct writeback_control wbc_writepages = {
2096 .sync_mode = WB_SYNC_NONE,
2097 .older_than_this = NULL,
2099 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2100 .range_end = (loff_t)-1,
2104 ret = __extent_writepage(page, wbc, &epd);
2106 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2108 submit_one_bio(WRITE, epd.bio);
2112 EXPORT_SYMBOL(extent_write_full_page);
2115 int extent_writepages(struct extent_map_tree *tree,
2116 struct address_space *mapping,
2117 get_extent_t *get_extent,
2118 struct writeback_control *wbc)
2121 struct extent_page_data epd = {
2124 .get_extent = get_extent,
2127 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2129 submit_one_bio(WRITE, epd.bio);
2133 EXPORT_SYMBOL(extent_writepages);
2135 int extent_readpages(struct extent_map_tree *tree,
2136 struct address_space *mapping,
2137 struct list_head *pages, unsigned nr_pages,
2138 get_extent_t get_extent)
2140 struct bio *bio = NULL;
2142 struct pagevec pvec;
2144 pagevec_init(&pvec, 0);
2145 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2146 struct page *page = list_entry(pages->prev, struct page, lru);
2148 prefetchw(&page->flags);
2149 list_del(&page->lru);
2151 * what we want to do here is call add_to_page_cache_lru,
2152 * but that isn't exported, so we reproduce it here
2154 if (!add_to_page_cache(page, mapping,
2155 page->index, GFP_KERNEL)) {
2157 /* open coding of lru_cache_add, also not exported */
2158 page_cache_get(page);
2159 if (!pagevec_add(&pvec, page))
2160 __pagevec_lru_add(&pvec);
2161 __extent_read_full_page(tree, page, get_extent, &bio);
2163 page_cache_release(page);
2165 if (pagevec_count(&pvec))
2166 __pagevec_lru_add(&pvec);
2167 BUG_ON(!list_empty(pages));
2169 submit_one_bio(READ, bio);
2172 EXPORT_SYMBOL(extent_readpages);
2175 * basic invalidatepage code, this waits on any locked or writeback
2176 * ranges corresponding to the page, and then deletes any extent state
2177 * records from the tree
2179 int extent_invalidatepage(struct extent_map_tree *tree,
2180 struct page *page, unsigned long offset)
2182 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2183 u64 end = start + PAGE_CACHE_SIZE - 1;
2184 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2186 start += (offset + blocksize -1) & ~(blocksize - 1);
2190 lock_extent(tree, start, end, GFP_NOFS);
2191 wait_on_extent_writeback(tree, start, end);
2192 clear_extent_bit(tree, start, end,
2193 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2197 EXPORT_SYMBOL(extent_invalidatepage);
2200 * simple commit_write call, set_range_dirty is used to mark both
2201 * the pages and the extent records as dirty
2203 int extent_commit_write(struct extent_map_tree *tree,
2204 struct inode *inode, struct page *page,
2205 unsigned from, unsigned to)
2207 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2209 set_page_extent_mapped(page);
2210 set_page_dirty(page);
2212 if (pos > inode->i_size) {
2213 i_size_write(inode, pos);
2214 mark_inode_dirty(inode);
2218 EXPORT_SYMBOL(extent_commit_write);
2220 int extent_prepare_write(struct extent_map_tree *tree,
2221 struct inode *inode, struct page *page,
2222 unsigned from, unsigned to, get_extent_t *get_extent)
2224 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2225 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2227 u64 orig_block_start;
2230 struct extent_map *em;
2231 unsigned blocksize = 1 << inode->i_blkbits;
2232 size_t page_offset = 0;
2233 size_t block_off_start;
2234 size_t block_off_end;
2240 set_page_extent_mapped(page);
2242 block_start = (page_start + from) & ~((u64)blocksize - 1);
2243 block_end = (page_start + to - 1) | (blocksize - 1);
2244 orig_block_start = block_start;
2246 lock_extent(tree, page_start, page_end, GFP_NOFS);
2247 while(block_start <= block_end) {
2248 em = get_extent(inode, page, page_offset, block_start,
2250 if (IS_ERR(em) || !em) {
2253 cur_end = min(block_end, em->end);
2254 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2255 block_off_end = block_off_start + blocksize;
2256 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2258 if (!PageUptodate(page) && isnew &&
2259 (block_off_end > to || block_off_start < from)) {
2262 kaddr = kmap_atomic(page, KM_USER0);
2263 if (block_off_end > to)
2264 memset(kaddr + to, 0, block_off_end - to);
2265 if (block_off_start < from)
2266 memset(kaddr + block_off_start, 0,
2267 from - block_off_start);
2268 flush_dcache_page(page);
2269 kunmap_atomic(kaddr, KM_USER0);
2271 if ((em->block_start != EXTENT_MAP_HOLE &&
2272 em->block_start != EXTENT_MAP_INLINE) &&
2273 !isnew && !PageUptodate(page) &&
2274 (block_off_end > to || block_off_start < from) &&
2275 !test_range_bit(tree, block_start, cur_end,
2276 EXTENT_UPTODATE, 1)) {
2278 u64 extent_offset = block_start - em->start;
2280 sector = (em->block_start + extent_offset) >> 9;
2281 iosize = (cur_end - block_start + blocksize) &
2282 ~((u64)blocksize - 1);
2284 * we've already got the extent locked, but we
2285 * need to split the state such that our end_bio
2286 * handler can clear the lock.
2288 set_extent_bit(tree, block_start,
2289 block_start + iosize - 1,
2290 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2291 ret = submit_extent_page(READ, tree, page,
2292 sector, iosize, page_offset, em->bdev,
2294 end_bio_extent_preparewrite);
2296 block_start = block_start + iosize;
2298 set_extent_uptodate(tree, block_start, cur_end,
2300 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2301 block_start = cur_end + 1;
2303 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2304 free_extent_map(em);
2307 wait_extent_bit(tree, orig_block_start,
2308 block_end, EXTENT_LOCKED);
2310 check_page_uptodate(tree, page);
2312 /* FIXME, zero out newly allocated blocks on error */
2315 EXPORT_SYMBOL(extent_prepare_write);
2318 * a helper for releasepage. As long as there are no locked extents
2319 * in the range corresponding to the page, both state records and extent
2320 * map records are removed
2322 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2324 struct extent_map *em;
2325 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2326 u64 end = start + PAGE_CACHE_SIZE - 1;
2327 u64 orig_start = start;
2330 while (start <= end) {
2331 em = lookup_extent_mapping(tree, start, end);
2332 if (!em || IS_ERR(em))
2334 if (!test_range_bit(tree, em->start, em->end,
2335 EXTENT_LOCKED, 0)) {
2336 remove_extent_mapping(tree, em);
2337 /* once for the rb tree */
2338 free_extent_map(em);
2340 start = em->end + 1;
2342 free_extent_map(em);
2344 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2347 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2351 EXPORT_SYMBOL(try_release_extent_mapping);
2353 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2354 get_extent_t *get_extent)
2356 struct inode *inode = mapping->host;
2357 u64 start = iblock << inode->i_blkbits;
2358 u64 end = start + (1 << inode->i_blkbits) - 1;
2359 sector_t sector = 0;
2360 struct extent_map *em;
2362 em = get_extent(inode, NULL, 0, start, end, 0);
2363 if (!em || IS_ERR(em))
2366 if (em->block_start == EXTENT_MAP_INLINE ||
2367 em->block_start == EXTENT_MAP_HOLE)
2370 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2372 free_extent_map(em);
2376 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2378 if (list_empty(&eb->lru)) {
2379 extent_buffer_get(eb);
2380 list_add(&eb->lru, &tree->buffer_lru);
2382 if (tree->lru_size >= BUFFER_LRU_MAX) {
2383 struct extent_buffer *rm;
2384 rm = list_entry(tree->buffer_lru.prev,
2385 struct extent_buffer, lru);
2387 list_del_init(&rm->lru);
2388 free_extent_buffer(rm);
2391 list_move(&eb->lru, &tree->buffer_lru);
2394 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2395 u64 start, unsigned long len)
2397 struct list_head *lru = &tree->buffer_lru;
2398 struct list_head *cur = lru->next;
2399 struct extent_buffer *eb;
2401 if (list_empty(lru))
2405 eb = list_entry(cur, struct extent_buffer, lru);
2406 if (eb->start == start && eb->len == len) {
2407 extent_buffer_get(eb);
2411 } while (cur != lru);
2415 static inline unsigned long num_extent_pages(u64 start, u64 len)
2417 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2418 (start >> PAGE_CACHE_SHIFT);
2421 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2425 struct address_space *mapping;
2428 return eb->first_page;
2429 i += eb->start >> PAGE_CACHE_SHIFT;
2430 mapping = eb->first_page->mapping;
2431 read_lock_irq(&mapping->tree_lock);
2432 p = radix_tree_lookup(&mapping->page_tree, i);
2433 read_unlock_irq(&mapping->tree_lock);
2437 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2442 struct extent_buffer *eb = NULL;
2444 spin_lock(&tree->lru_lock);
2445 eb = find_lru(tree, start, len);
2446 spin_unlock(&tree->lru_lock);
2451 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2452 INIT_LIST_HEAD(&eb->lru);
2455 atomic_set(&eb->refs, 1);
2460 static void __free_extent_buffer(struct extent_buffer *eb)
2462 kmem_cache_free(extent_buffer_cache, eb);
2465 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2466 u64 start, unsigned long len,
2470 unsigned long num_pages = num_extent_pages(start, len);
2472 unsigned long index = start >> PAGE_CACHE_SHIFT;
2473 struct extent_buffer *eb;
2475 struct address_space *mapping = tree->mapping;
2478 eb = __alloc_extent_buffer(tree, start, len, mask);
2479 if (!eb || IS_ERR(eb))
2482 if (eb->flags & EXTENT_BUFFER_FILLED)
2486 eb->first_page = page0;
2489 page_cache_get(page0);
2490 mark_page_accessed(page0);
2491 set_page_extent_mapped(page0);
2492 WARN_ON(!PageUptodate(page0));
2493 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2498 for (; i < num_pages; i++, index++) {
2499 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2504 set_page_extent_mapped(p);
2505 mark_page_accessed(p);
2508 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2511 set_page_private(p, EXTENT_PAGE_PRIVATE);
2513 if (!PageUptodate(p))
2518 eb->flags |= EXTENT_UPTODATE;
2519 eb->flags |= EXTENT_BUFFER_FILLED;
2522 spin_lock(&tree->lru_lock);
2524 spin_unlock(&tree->lru_lock);
2528 spin_lock(&tree->lru_lock);
2529 list_del_init(&eb->lru);
2530 spin_unlock(&tree->lru_lock);
2531 if (!atomic_dec_and_test(&eb->refs))
2533 for (index = 1; index < i; index++) {
2534 page_cache_release(extent_buffer_page(eb, index));
2537 page_cache_release(extent_buffer_page(eb, 0));
2538 __free_extent_buffer(eb);
2541 EXPORT_SYMBOL(alloc_extent_buffer);
2543 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2544 u64 start, unsigned long len,
2547 unsigned long num_pages = num_extent_pages(start, len);
2549 unsigned long index = start >> PAGE_CACHE_SHIFT;
2550 struct extent_buffer *eb;
2552 struct address_space *mapping = tree->mapping;
2555 eb = __alloc_extent_buffer(tree, start, len, mask);
2556 if (!eb || IS_ERR(eb))
2559 if (eb->flags & EXTENT_BUFFER_FILLED)
2562 for (i = 0; i < num_pages; i++, index++) {
2563 p = find_lock_page(mapping, index);
2567 set_page_extent_mapped(p);
2568 mark_page_accessed(p);
2572 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2575 set_page_private(p, EXTENT_PAGE_PRIVATE);
2578 if (!PageUptodate(p))
2583 eb->flags |= EXTENT_UPTODATE;
2584 eb->flags |= EXTENT_BUFFER_FILLED;
2587 spin_lock(&tree->lru_lock);
2589 spin_unlock(&tree->lru_lock);
2592 spin_lock(&tree->lru_lock);
2593 list_del_init(&eb->lru);
2594 spin_unlock(&tree->lru_lock);
2595 if (!atomic_dec_and_test(&eb->refs))
2597 for (index = 1; index < i; index++) {
2598 page_cache_release(extent_buffer_page(eb, index));
2601 page_cache_release(extent_buffer_page(eb, 0));
2602 __free_extent_buffer(eb);
2605 EXPORT_SYMBOL(find_extent_buffer);
2607 void free_extent_buffer(struct extent_buffer *eb)
2610 unsigned long num_pages;
2615 if (!atomic_dec_and_test(&eb->refs))
2618 WARN_ON(!list_empty(&eb->lru));
2619 num_pages = num_extent_pages(eb->start, eb->len);
2621 for (i = 1; i < num_pages; i++) {
2622 page_cache_release(extent_buffer_page(eb, i));
2624 page_cache_release(extent_buffer_page(eb, 0));
2625 __free_extent_buffer(eb);
2627 EXPORT_SYMBOL(free_extent_buffer);
2629 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2630 struct extent_buffer *eb)
2634 unsigned long num_pages;
2637 u64 start = eb->start;
2638 u64 end = start + eb->len - 1;
2640 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2641 num_pages = num_extent_pages(eb->start, eb->len);
2643 for (i = 0; i < num_pages; i++) {
2644 page = extent_buffer_page(eb, i);
2647 * if we're on the last page or the first page and the
2648 * block isn't aligned on a page boundary, do extra checks
2649 * to make sure we don't clean page that is partially dirty
2651 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2652 ((i == num_pages - 1) &&
2653 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2654 start = (u64)page->index << PAGE_CACHE_SHIFT;
2655 end = start + PAGE_CACHE_SIZE - 1;
2656 if (test_range_bit(tree, start, end,
2662 clear_page_dirty_for_io(page);
2663 write_lock_irq(&page->mapping->tree_lock);
2664 if (!PageDirty(page)) {
2665 radix_tree_tag_clear(&page->mapping->page_tree,
2667 PAGECACHE_TAG_DIRTY);
2669 write_unlock_irq(&page->mapping->tree_lock);
2674 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2676 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2677 struct extent_buffer *eb)
2679 return wait_on_extent_writeback(tree, eb->start,
2680 eb->start + eb->len - 1);
2682 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2684 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2685 struct extent_buffer *eb)
2688 unsigned long num_pages;
2690 num_pages = num_extent_pages(eb->start, eb->len);
2691 for (i = 0; i < num_pages; i++) {
2692 struct page *page = extent_buffer_page(eb, i);
2693 /* writepage may need to do something special for the
2694 * first page, we have to make sure page->private is
2695 * properly set. releasepage may drop page->private
2696 * on us if the page isn't already dirty.
2700 set_page_private(page,
2701 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2704 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2708 return set_extent_dirty(tree, eb->start,
2709 eb->start + eb->len - 1, GFP_NOFS);
2711 EXPORT_SYMBOL(set_extent_buffer_dirty);
2713 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2714 struct extent_buffer *eb)
2718 unsigned long num_pages;
2720 num_pages = num_extent_pages(eb->start, eb->len);
2722 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2724 for (i = 0; i < num_pages; i++) {
2725 page = extent_buffer_page(eb, i);
2726 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2727 ((i == num_pages - 1) &&
2728 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2729 check_page_uptodate(tree, page);
2732 SetPageUptodate(page);
2736 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2738 int extent_buffer_uptodate(struct extent_map_tree *tree,
2739 struct extent_buffer *eb)
2741 if (eb->flags & EXTENT_UPTODATE)
2743 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2744 EXTENT_UPTODATE, 1);
2746 EXPORT_SYMBOL(extent_buffer_uptodate);
2748 int read_extent_buffer_pages(struct extent_map_tree *tree,
2749 struct extent_buffer *eb,
2754 unsigned long start_i;
2758 unsigned long num_pages;
2760 if (eb->flags & EXTENT_UPTODATE)
2763 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2764 EXTENT_UPTODATE, 1)) {
2769 WARN_ON(start < eb->start);
2770 start_i = (start >> PAGE_CACHE_SHIFT) -
2771 (eb->start >> PAGE_CACHE_SHIFT);
2776 num_pages = num_extent_pages(eb->start, eb->len);
2777 for (i = start_i; i < num_pages; i++) {
2778 page = extent_buffer_page(eb, i);
2779 if (PageUptodate(page)) {
2783 if (TestSetPageLocked(page)) {
2789 if (!PageUptodate(page)) {
2790 err = page->mapping->a_ops->readpage(NULL, page);
2803 for (i = start_i; i < num_pages; i++) {
2804 page = extent_buffer_page(eb, i);
2805 wait_on_page_locked(page);
2806 if (!PageUptodate(page)) {
2811 eb->flags |= EXTENT_UPTODATE;
2814 EXPORT_SYMBOL(read_extent_buffer_pages);
2816 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2817 unsigned long start,
2824 char *dst = (char *)dstv;
2825 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2826 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2827 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2829 WARN_ON(start > eb->len);
2830 WARN_ON(start + len > eb->start + eb->len);
2832 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2835 page = extent_buffer_page(eb, i);
2836 if (!PageUptodate(page)) {
2837 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2840 WARN_ON(!PageUptodate(page));
2842 cur = min(len, (PAGE_CACHE_SIZE - offset));
2843 kaddr = kmap_atomic(page, KM_USER1);
2844 memcpy(dst, kaddr + offset, cur);
2845 kunmap_atomic(kaddr, KM_USER1);
2853 EXPORT_SYMBOL(read_extent_buffer);
2855 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2856 unsigned long min_len, char **token, char **map,
2857 unsigned long *map_start,
2858 unsigned long *map_len, int km)
2860 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2863 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2864 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2865 unsigned long end_i = (start_offset + start + min_len - 1) >>
2872 offset = start_offset;
2876 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2878 if (start + min_len > eb->len) {
2879 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2883 p = extent_buffer_page(eb, i);
2884 WARN_ON(!PageUptodate(p));
2885 kaddr = kmap_atomic(p, km);
2887 *map = kaddr + offset;
2888 *map_len = PAGE_CACHE_SIZE - offset;
2891 EXPORT_SYMBOL(map_private_extent_buffer);
2893 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2894 unsigned long min_len,
2895 char **token, char **map,
2896 unsigned long *map_start,
2897 unsigned long *map_len, int km)
2901 if (eb->map_token) {
2902 unmap_extent_buffer(eb, eb->map_token, km);
2903 eb->map_token = NULL;
2906 err = map_private_extent_buffer(eb, start, min_len, token, map,
2907 map_start, map_len, km);
2909 eb->map_token = *token;
2911 eb->map_start = *map_start;
2912 eb->map_len = *map_len;
2916 EXPORT_SYMBOL(map_extent_buffer);
2918 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2920 kunmap_atomic(token, km);
2922 EXPORT_SYMBOL(unmap_extent_buffer);
2924 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2925 unsigned long start,
2932 char *ptr = (char *)ptrv;
2933 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2934 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2937 WARN_ON(start > eb->len);
2938 WARN_ON(start + len > eb->start + eb->len);
2940 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2943 page = extent_buffer_page(eb, i);
2944 WARN_ON(!PageUptodate(page));
2946 cur = min(len, (PAGE_CACHE_SIZE - offset));
2948 kaddr = kmap_atomic(page, KM_USER0);
2949 ret = memcmp(ptr, kaddr + offset, cur);
2950 kunmap_atomic(kaddr, KM_USER0);
2961 EXPORT_SYMBOL(memcmp_extent_buffer);
2963 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2964 unsigned long start, unsigned long len)
2970 char *src = (char *)srcv;
2971 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2972 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2974 WARN_ON(start > eb->len);
2975 WARN_ON(start + len > eb->start + eb->len);
2977 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2980 page = extent_buffer_page(eb, i);
2981 WARN_ON(!PageUptodate(page));
2983 cur = min(len, PAGE_CACHE_SIZE - offset);
2984 kaddr = kmap_atomic(page, KM_USER1);
2985 memcpy(kaddr + offset, src, cur);
2986 kunmap_atomic(kaddr, KM_USER1);
2994 EXPORT_SYMBOL(write_extent_buffer);
2996 void memset_extent_buffer(struct extent_buffer *eb, char c,
2997 unsigned long start, unsigned long len)
3003 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3004 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3006 WARN_ON(start > eb->len);
3007 WARN_ON(start + len > eb->start + eb->len);
3009 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3012 page = extent_buffer_page(eb, i);
3013 WARN_ON(!PageUptodate(page));
3015 cur = min(len, PAGE_CACHE_SIZE - offset);
3016 kaddr = kmap_atomic(page, KM_USER0);
3017 memset(kaddr + offset, c, cur);
3018 kunmap_atomic(kaddr, KM_USER0);
3025 EXPORT_SYMBOL(memset_extent_buffer);
3027 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3028 unsigned long dst_offset, unsigned long src_offset,
3031 u64 dst_len = dst->len;
3036 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3037 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3039 WARN_ON(src->len != dst_len);
3041 offset = (start_offset + dst_offset) &
3042 ((unsigned long)PAGE_CACHE_SIZE - 1);
3045 page = extent_buffer_page(dst, i);
3046 WARN_ON(!PageUptodate(page));
3048 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3050 kaddr = kmap_atomic(page, KM_USER0);
3051 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3052 kunmap_atomic(kaddr, KM_USER0);
3060 EXPORT_SYMBOL(copy_extent_buffer);
3062 static void move_pages(struct page *dst_page, struct page *src_page,
3063 unsigned long dst_off, unsigned long src_off,
3066 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3067 if (dst_page == src_page) {
3068 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3070 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3071 char *p = dst_kaddr + dst_off + len;
3072 char *s = src_kaddr + src_off + len;
3077 kunmap_atomic(src_kaddr, KM_USER1);
3079 kunmap_atomic(dst_kaddr, KM_USER0);
3082 static void copy_pages(struct page *dst_page, struct page *src_page,
3083 unsigned long dst_off, unsigned long src_off,
3086 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3089 if (dst_page != src_page)
3090 src_kaddr = kmap_atomic(src_page, KM_USER1);
3092 src_kaddr = dst_kaddr;
3094 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3095 kunmap_atomic(dst_kaddr, KM_USER0);
3096 if (dst_page != src_page)
3097 kunmap_atomic(src_kaddr, KM_USER1);
3100 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3101 unsigned long src_offset, unsigned long len)
3104 size_t dst_off_in_page;
3105 size_t src_off_in_page;
3106 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3107 unsigned long dst_i;
3108 unsigned long src_i;
3110 if (src_offset + len > dst->len) {
3111 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3112 src_offset, len, dst->len);
3115 if (dst_offset + len > dst->len) {
3116 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3117 dst_offset, len, dst->len);
3122 dst_off_in_page = (start_offset + dst_offset) &
3123 ((unsigned long)PAGE_CACHE_SIZE - 1);
3124 src_off_in_page = (start_offset + src_offset) &
3125 ((unsigned long)PAGE_CACHE_SIZE - 1);
3127 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3128 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3130 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3132 cur = min_t(unsigned long, cur,
3133 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3135 copy_pages(extent_buffer_page(dst, dst_i),
3136 extent_buffer_page(dst, src_i),
3137 dst_off_in_page, src_off_in_page, cur);
3144 EXPORT_SYMBOL(memcpy_extent_buffer);
3146 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3147 unsigned long src_offset, unsigned long len)
3150 size_t dst_off_in_page;
3151 size_t src_off_in_page;
3152 unsigned long dst_end = dst_offset + len - 1;
3153 unsigned long src_end = src_offset + len - 1;
3154 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3155 unsigned long dst_i;
3156 unsigned long src_i;
3158 if (src_offset + len > dst->len) {
3159 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3160 src_offset, len, dst->len);
3163 if (dst_offset + len > dst->len) {
3164 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3165 dst_offset, len, dst->len);
3168 if (dst_offset < src_offset) {
3169 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3173 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3174 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3176 dst_off_in_page = (start_offset + dst_end) &
3177 ((unsigned long)PAGE_CACHE_SIZE - 1);
3178 src_off_in_page = (start_offset + src_end) &
3179 ((unsigned long)PAGE_CACHE_SIZE - 1);
3181 cur = min_t(unsigned long, len, src_off_in_page + 1);
3182 cur = min(cur, dst_off_in_page + 1);
3183 move_pages(extent_buffer_page(dst, dst_i),
3184 extent_buffer_page(dst, src_i),
3185 dst_off_in_page - cur + 1,
3186 src_off_in_page - cur + 1, cur);
3193 EXPORT_SYMBOL(memmove_extent_buffer);