2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
34 #include "transaction.h"
35 #include "btrfs_inode.h"
37 #include "print-tree.h"
43 /* simple helper to fault in pages and copy. This should go away
44 * and be replaced with calls into generic code.
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 struct page **prepared_pages,
49 const char __user *buf)
53 int offset = pos & (PAGE_CACHE_SIZE - 1);
55 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
56 size_t count = min_t(size_t,
57 PAGE_CACHE_SIZE - offset, write_bytes);
58 struct page *page = prepared_pages[i];
59 fault_in_pages_readable(buf, count);
61 /* Copy data from userspace to the current page */
63 page_fault = __copy_from_user(page_address(page) + offset,
65 /* Flush processor's dcache for this page */
66 flush_dcache_page(page);
74 return page_fault ? -EFAULT : 0;
78 * unlocks pages after btrfs_file_write is done with them
80 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
83 for (i = 0; i < num_pages; i++) {
86 /* page checked is some magic around finding pages that
87 * have been modified without going through btrfs_set_page_dirty
90 ClearPageChecked(pages[i]);
91 unlock_page(pages[i]);
92 mark_page_accessed(pages[i]);
93 page_cache_release(pages[i]);
98 * after copy_from_user, pages need to be dirtied and we need to make
99 * sure holes are created between the current EOF and the start of
100 * any next extents (if required).
102 * this also makes the decision about creating an inline extent vs
103 * doing real data extents, marking pages dirty and delalloc as required.
105 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
115 struct inode *inode = fdentry(file)->d_inode;
116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
120 u64 end_of_last_block;
121 u64 end_pos = pos + write_bytes;
122 loff_t isize = i_size_read(inode);
124 start_pos = pos & ~((u64)root->sectorsize - 1);
125 num_bytes = (write_bytes + pos - start_pos +
126 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
128 end_of_last_block = start_pos + num_bytes - 1;
130 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
131 trans = btrfs_join_transaction(root, 1);
136 btrfs_set_trans_block_group(trans, inode);
139 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
141 /* check for reserved extents on each page, we don't want
142 * to reset the delalloc bit on things that already have
145 btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
146 for (i = 0; i < num_pages; i++) {
147 struct page *p = pages[i];
152 if (end_pos > isize) {
153 i_size_write(inode, end_pos);
154 btrfs_update_inode(trans, root, inode);
156 err = btrfs_end_transaction(trans, root);
158 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
163 * this drops all the extents in the cache that intersect the range
164 * [start, end]. Existing extents are split as required.
166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
169 struct extent_map *em;
170 struct extent_map *split = NULL;
171 struct extent_map *split2 = NULL;
172 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
173 u64 len = end - start + 1;
179 WARN_ON(end < start);
180 if (end == (u64)-1) {
186 split = alloc_extent_map(GFP_NOFS);
188 split2 = alloc_extent_map(GFP_NOFS);
190 spin_lock(&em_tree->lock);
191 em = lookup_extent_mapping(em_tree, start, len);
193 spin_unlock(&em_tree->lock);
197 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
198 spin_unlock(&em_tree->lock);
199 if (em->start <= start &&
200 (!testend || em->start + em->len >= start + len)) {
204 if (start < em->start) {
205 len = em->start - start;
207 len = start + len - (em->start + em->len);
208 start = em->start + em->len;
213 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
214 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
215 remove_extent_mapping(em_tree, em);
217 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
219 split->start = em->start;
220 split->len = start - em->start;
221 split->orig_start = em->orig_start;
222 split->block_start = em->block_start;
225 split->block_len = em->block_len;
227 split->block_len = split->len;
229 split->bdev = em->bdev;
230 split->flags = flags;
231 ret = add_extent_mapping(em_tree, split);
233 free_extent_map(split);
237 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
238 testend && em->start + em->len > start + len) {
239 u64 diff = start + len - em->start;
241 split->start = start + len;
242 split->len = em->start + em->len - (start + len);
243 split->bdev = em->bdev;
244 split->flags = flags;
247 split->block_len = em->block_len;
248 split->block_start = em->block_start;
249 split->orig_start = em->orig_start;
251 split->block_len = split->len;
252 split->block_start = em->block_start + diff;
253 split->orig_start = split->start;
256 ret = add_extent_mapping(em_tree, split);
258 free_extent_map(split);
261 spin_unlock(&em_tree->lock);
265 /* once for the tree*/
269 free_extent_map(split);
271 free_extent_map(split2);
275 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
279 struct btrfs_path *path;
280 struct btrfs_key found_key;
281 struct extent_buffer *leaf;
282 struct btrfs_file_extent_item *extent;
291 path = btrfs_alloc_path();
292 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
295 nritems = btrfs_header_nritems(path->nodes[0]);
296 if (path->slots[0] >= nritems) {
297 ret = btrfs_next_leaf(root, path);
300 nritems = btrfs_header_nritems(path->nodes[0]);
302 slot = path->slots[0];
303 leaf = path->nodes[0];
304 btrfs_item_key_to_cpu(leaf, &found_key, slot);
305 if (found_key.objectid != inode->i_ino)
307 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
310 if (found_key.offset < last_offset) {
312 btrfs_print_leaf(root, leaf);
313 printk(KERN_ERR "inode %lu found offset %llu "
314 "expected %llu\n", inode->i_ino,
315 (unsigned long long)found_key.offset,
316 (unsigned long long)last_offset);
320 extent = btrfs_item_ptr(leaf, slot,
321 struct btrfs_file_extent_item);
322 found_type = btrfs_file_extent_type(leaf, extent);
323 if (found_type == BTRFS_FILE_EXTENT_REG) {
324 extent_end = found_key.offset +
325 btrfs_file_extent_num_bytes(leaf, extent);
326 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
327 struct btrfs_item *item;
328 item = btrfs_item_nr(leaf, slot);
329 extent_end = found_key.offset +
330 btrfs_file_extent_inline_len(leaf, extent);
331 extent_end = (extent_end + root->sectorsize - 1) &
332 ~((u64)root->sectorsize - 1);
334 last_offset = extent_end;
337 if (0 && last_offset < inode->i_size) {
339 btrfs_print_leaf(root, leaf);
340 printk(KERN_ERR "inode %lu found offset %llu size %llu\n",
341 inode->i_ino, (unsigned long long)last_offset,
342 (unsigned long long)inode->i_size);
347 btrfs_free_path(path);
353 * this is very complex, but the basic idea is to drop all extents
354 * in the range start - end. hint_block is filled in with a block number
355 * that would be a good hint to the block allocator for this file.
357 * If an extent intersects the range but is not entirely inside the range
358 * it is either truncated or split. Anything entirely inside the range
359 * is deleted from the tree.
361 * inline_limit is used to tell this code which offsets in the file to keep
362 * if they contain inline extents.
364 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
365 struct btrfs_root *root, struct inode *inode,
366 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
369 u64 locked_end = end;
370 u64 search_start = start;
377 u16 other_encoding = 0;
380 struct extent_buffer *leaf;
381 struct btrfs_file_extent_item *extent;
382 struct btrfs_path *path;
383 struct btrfs_key key;
384 struct btrfs_file_extent_item old;
395 btrfs_drop_extent_cache(inode, start, end - 1, 0);
397 path = btrfs_alloc_path();
402 btrfs_release_path(root, path);
403 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
408 if (path->slots[0] == 0) {
425 leaf = path->nodes[0];
426 slot = path->slots[0];
428 btrfs_item_key_to_cpu(leaf, &key, slot);
429 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
433 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
434 key.objectid != inode->i_ino) {
438 search_start = max(key.offset, start);
441 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
442 extent = btrfs_item_ptr(leaf, slot,
443 struct btrfs_file_extent_item);
444 found_type = btrfs_file_extent_type(leaf, extent);
445 compression = btrfs_file_extent_compression(leaf,
447 encryption = btrfs_file_extent_encryption(leaf,
449 other_encoding = btrfs_file_extent_other_encoding(leaf,
451 if (found_type == BTRFS_FILE_EXTENT_REG ||
452 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
454 btrfs_file_extent_disk_bytenr(leaf,
457 *hint_byte = extent_end;
459 extent_end = key.offset +
460 btrfs_file_extent_num_bytes(leaf, extent);
461 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
464 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
466 extent_end = key.offset +
467 btrfs_file_extent_inline_len(leaf, extent);
470 extent_end = search_start;
473 /* we found nothing we can drop */
474 if ((!found_extent && !found_inline) ||
475 search_start >= extent_end) {
478 nritems = btrfs_header_nritems(leaf);
479 if (slot >= nritems - 1) {
480 nextret = btrfs_next_leaf(root, path);
490 if (end <= extent_end && start >= key.offset && found_inline)
491 *hint_byte = EXTENT_MAP_INLINE;
494 read_extent_buffer(leaf, &old, (unsigned long)extent,
496 root_gen = btrfs_header_generation(leaf);
497 root_owner = btrfs_header_owner(leaf);
498 leaf_start = leaf->start;
501 if (end < extent_end && end >= key.offset) {
503 if (found_inline && start <= key.offset)
507 if (bookend && found_extent) {
508 if (locked_end < extent_end) {
509 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
510 locked_end, extent_end - 1,
513 btrfs_release_path(root, path);
514 lock_extent(&BTRFS_I(inode)->io_tree,
515 locked_end, extent_end - 1,
517 locked_end = extent_end;
520 locked_end = extent_end;
522 orig_parent = path->nodes[0]->start;
523 disk_bytenr = le64_to_cpu(old.disk_bytenr);
524 if (disk_bytenr != 0) {
525 ret = btrfs_inc_extent_ref(trans, root,
527 le64_to_cpu(old.disk_num_bytes),
528 orig_parent, root->root_key.objectid,
529 trans->transid, inode->i_ino);
535 u64 mask = root->sectorsize - 1;
536 search_start = (extent_end + mask) & ~mask;
538 search_start = extent_end;
540 /* truncate existing extent */
541 if (start > key.offset) {
545 WARN_ON(start & (root->sectorsize - 1));
547 new_num = start - key.offset;
548 old_num = btrfs_file_extent_num_bytes(leaf,
551 btrfs_file_extent_disk_bytenr(leaf,
553 if (btrfs_file_extent_disk_bytenr(leaf,
555 inode_sub_bytes(inode, old_num -
558 btrfs_set_file_extent_num_bytes(leaf,
560 btrfs_mark_buffer_dirty(leaf);
561 } else if (key.offset < inline_limit &&
562 (end > extent_end) &&
563 (inline_limit < extent_end)) {
565 new_size = btrfs_file_extent_calc_inline_size(
566 inline_limit - key.offset);
567 inode_sub_bytes(inode, extent_end -
569 btrfs_set_file_extent_ram_bytes(leaf, extent,
571 if (!compression && !encryption) {
572 btrfs_truncate_item(trans, root, path,
577 /* delete the entire extent */
580 inode_sub_bytes(inode, extent_end -
582 ret = btrfs_del_item(trans, root, path);
583 /* TODO update progress marker and return */
586 btrfs_release_path(root, path);
587 /* the extent will be freed later */
589 if (bookend && found_inline && start <= key.offset) {
591 new_size = btrfs_file_extent_calc_inline_size(
593 inode_sub_bytes(inode, end - key.offset);
594 btrfs_set_file_extent_ram_bytes(leaf, extent,
596 if (!compression && !encryption)
597 ret = btrfs_truncate_item(trans, root, path,
601 /* create bookend, splitting the extent in two */
602 if (bookend && found_extent) {
603 struct btrfs_key ins;
604 ins.objectid = inode->i_ino;
606 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
608 btrfs_release_path(root, path);
609 path->leave_spinning = 1;
610 ret = btrfs_insert_empty_item(trans, root, path, &ins,
614 leaf = path->nodes[0];
615 extent = btrfs_item_ptr(leaf, path->slots[0],
616 struct btrfs_file_extent_item);
617 write_extent_buffer(leaf, &old,
618 (unsigned long)extent, sizeof(old));
620 btrfs_set_file_extent_compression(leaf, extent,
622 btrfs_set_file_extent_encryption(leaf, extent,
624 btrfs_set_file_extent_other_encoding(leaf, extent,
626 btrfs_set_file_extent_offset(leaf, extent,
627 le64_to_cpu(old.offset) + end - key.offset);
628 WARN_ON(le64_to_cpu(old.num_bytes) <
630 btrfs_set_file_extent_num_bytes(leaf, extent,
634 * set the ram bytes to the size of the full extent
635 * before splitting. This is a worst case flag,
636 * but its the best we can do because we don't know
637 * how splitting affects compression
639 btrfs_set_file_extent_ram_bytes(leaf, extent,
641 btrfs_set_file_extent_type(leaf, extent, found_type);
643 btrfs_unlock_up_safe(path, 1);
644 btrfs_mark_buffer_dirty(path->nodes[0]);
645 btrfs_set_lock_blocking(path->nodes[0]);
647 if (disk_bytenr != 0) {
648 ret = btrfs_update_extent_ref(trans, root,
650 le64_to_cpu(old.disk_num_bytes),
653 root->root_key.objectid,
654 trans->transid, ins.objectid);
658 path->leave_spinning = 0;
659 btrfs_release_path(root, path);
660 if (disk_bytenr != 0)
661 inode_add_bytes(inode, extent_end - end);
664 if (found_extent && !keep) {
665 u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
667 if (old_disk_bytenr != 0) {
668 inode_sub_bytes(inode,
669 le64_to_cpu(old.num_bytes));
670 ret = btrfs_free_extent(trans, root,
672 le64_to_cpu(old.disk_num_bytes),
673 leaf_start, root_owner,
674 root_gen, key.objectid, 0);
676 *hint_byte = old_disk_bytenr;
680 if (search_start >= end) {
686 btrfs_free_path(path);
687 if (locked_end > end) {
688 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
691 btrfs_check_file(root, inode);
695 static int extent_mergeable(struct extent_buffer *leaf, int slot,
696 u64 objectid, u64 bytenr, u64 *start, u64 *end)
698 struct btrfs_file_extent_item *fi;
699 struct btrfs_key key;
702 if (slot < 0 || slot >= btrfs_header_nritems(leaf))
705 btrfs_item_key_to_cpu(leaf, &key, slot);
706 if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
709 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
710 if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
711 btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
712 btrfs_file_extent_compression(leaf, fi) ||
713 btrfs_file_extent_encryption(leaf, fi) ||
714 btrfs_file_extent_other_encoding(leaf, fi))
717 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
718 if ((*start && *start != key.offset) || (*end && *end != extent_end))
727 * Mark extent in the range start - end as written.
729 * This changes extent type from 'pre-allocated' to 'regular'. If only
730 * part of extent is marked as written, the extent will be split into
733 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
734 struct btrfs_root *root,
735 struct inode *inode, u64 start, u64 end)
737 struct extent_buffer *leaf;
738 struct btrfs_path *path;
739 struct btrfs_file_extent_item *fi;
740 struct btrfs_key key;
748 u64 locked_end = end;
754 btrfs_drop_extent_cache(inode, start, end - 1, 0);
756 path = btrfs_alloc_path();
759 key.objectid = inode->i_ino;
760 key.type = BTRFS_EXTENT_DATA_KEY;
764 key.offset = split - 1;
766 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
767 if (ret > 0 && path->slots[0] > 0)
770 leaf = path->nodes[0];
771 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
772 BUG_ON(key.objectid != inode->i_ino ||
773 key.type != BTRFS_EXTENT_DATA_KEY);
774 fi = btrfs_item_ptr(leaf, path->slots[0],
775 struct btrfs_file_extent_item);
776 extent_type = btrfs_file_extent_type(leaf, fi);
777 BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
778 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
779 BUG_ON(key.offset > start || extent_end < end);
781 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
782 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
783 extent_offset = btrfs_file_extent_offset(leaf, fi);
785 if (key.offset == start)
788 if (key.offset == start && extent_end == end) {
791 u64 leaf_owner = btrfs_header_owner(leaf);
792 u64 leaf_gen = btrfs_header_generation(leaf);
795 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
796 bytenr, &other_start, &other_end)) {
797 extent_end = other_end;
798 del_slot = path->slots[0] + 1;
800 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
801 leaf->start, leaf_owner,
802 leaf_gen, inode->i_ino, 0);
807 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
808 bytenr, &other_start, &other_end)) {
809 key.offset = other_start;
810 del_slot = path->slots[0];
812 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
813 leaf->start, leaf_owner,
814 leaf_gen, inode->i_ino, 0);
819 btrfs_set_file_extent_type(leaf, fi,
820 BTRFS_FILE_EXTENT_REG);
824 fi = btrfs_item_ptr(leaf, del_slot - 1,
825 struct btrfs_file_extent_item);
826 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
827 btrfs_set_file_extent_num_bytes(leaf, fi,
828 extent_end - key.offset);
829 btrfs_mark_buffer_dirty(leaf);
831 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
834 } else if (split == start) {
835 if (locked_end < extent_end) {
836 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
837 locked_end, extent_end - 1, GFP_NOFS);
839 btrfs_release_path(root, path);
840 lock_extent(&BTRFS_I(inode)->io_tree,
841 locked_end, extent_end - 1, GFP_NOFS);
842 locked_end = extent_end;
845 locked_end = extent_end;
847 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
848 extent_offset += split - key.offset;
850 BUG_ON(key.offset != start);
851 btrfs_set_file_extent_offset(leaf, fi, extent_offset +
853 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
855 btrfs_set_item_key_safe(trans, root, path, &key);
859 if (extent_end == end) {
861 extent_type = BTRFS_FILE_EXTENT_REG;
863 if (extent_end == end && split == start) {
866 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
867 bytenr, &other_start, &other_end)) {
869 fi = btrfs_item_ptr(leaf, path->slots[0],
870 struct btrfs_file_extent_item);
872 btrfs_set_item_key_safe(trans, root, path, &key);
873 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
874 btrfs_set_file_extent_num_bytes(leaf, fi,
879 if (extent_end == end && split == end) {
882 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
883 bytenr, &other_start, &other_end)) {
885 fi = btrfs_item_ptr(leaf, path->slots[0],
886 struct btrfs_file_extent_item);
887 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
893 btrfs_mark_buffer_dirty(leaf);
895 orig_parent = leaf->start;
896 ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
897 orig_parent, root->root_key.objectid,
898 trans->transid, inode->i_ino);
900 btrfs_release_path(root, path);
903 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
906 leaf = path->nodes[0];
907 fi = btrfs_item_ptr(leaf, path->slots[0],
908 struct btrfs_file_extent_item);
909 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
910 btrfs_set_file_extent_type(leaf, fi, extent_type);
911 btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
912 btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
913 btrfs_set_file_extent_offset(leaf, fi, extent_offset);
914 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
915 btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
916 btrfs_set_file_extent_compression(leaf, fi, 0);
917 btrfs_set_file_extent_encryption(leaf, fi, 0);
918 btrfs_set_file_extent_other_encoding(leaf, fi, 0);
920 if (orig_parent != leaf->start) {
921 ret = btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
922 orig_parent, leaf->start,
923 root->root_key.objectid,
924 trans->transid, inode->i_ino);
928 btrfs_mark_buffer_dirty(leaf);
929 btrfs_release_path(root, path);
930 if (split_end && split == start) {
934 if (locked_end > end) {
935 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
938 btrfs_free_path(path);
943 * this gets pages into the page cache and locks them down, it also properly
944 * waits for data=ordered extents to finish before allowing the pages to be
947 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
948 struct page **pages, size_t num_pages,
949 loff_t pos, unsigned long first_index,
950 unsigned long last_index, size_t write_bytes)
953 unsigned long index = pos >> PAGE_CACHE_SHIFT;
954 struct inode *inode = fdentry(file)->d_inode;
959 start_pos = pos & ~((u64)root->sectorsize - 1);
960 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
962 if (start_pos > inode->i_size) {
963 err = btrfs_cont_expand(inode, start_pos);
968 memset(pages, 0, num_pages * sizeof(struct page *));
970 for (i = 0; i < num_pages; i++) {
971 pages[i] = grab_cache_page(inode->i_mapping, index + i);
976 wait_on_page_writeback(pages[i]);
978 if (start_pos < inode->i_size) {
979 struct btrfs_ordered_extent *ordered;
980 lock_extent(&BTRFS_I(inode)->io_tree,
981 start_pos, last_pos - 1, GFP_NOFS);
982 ordered = btrfs_lookup_first_ordered_extent(inode,
985 ordered->file_offset + ordered->len > start_pos &&
986 ordered->file_offset < last_pos) {
987 btrfs_put_ordered_extent(ordered);
988 unlock_extent(&BTRFS_I(inode)->io_tree,
989 start_pos, last_pos - 1, GFP_NOFS);
990 for (i = 0; i < num_pages; i++) {
991 unlock_page(pages[i]);
992 page_cache_release(pages[i]);
994 btrfs_wait_ordered_range(inode, start_pos,
995 last_pos - start_pos);
999 btrfs_put_ordered_extent(ordered);
1001 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
1002 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
1004 unlock_extent(&BTRFS_I(inode)->io_tree,
1005 start_pos, last_pos - 1, GFP_NOFS);
1007 for (i = 0; i < num_pages; i++) {
1008 clear_page_dirty_for_io(pages[i]);
1009 set_page_extent_mapped(pages[i]);
1010 WARN_ON(!PageLocked(pages[i]));
1015 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1016 size_t count, loff_t *ppos)
1020 ssize_t num_written = 0;
1023 struct inode *inode = fdentry(file)->d_inode;
1024 struct btrfs_root *root = BTRFS_I(inode)->root;
1025 struct page **pages = NULL;
1027 struct page *pinned[2];
1028 unsigned long first_index;
1029 unsigned long last_index;
1032 will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
1033 (file->f_flags & O_DIRECT));
1035 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
1036 PAGE_CACHE_SIZE / (sizeof(struct page *)));
1043 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1044 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1045 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1051 err = file_remove_suid(file);
1054 file_update_time(file);
1056 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
1058 mutex_lock(&inode->i_mutex);
1059 BTRFS_I(inode)->sequence++;
1060 first_index = pos >> PAGE_CACHE_SHIFT;
1061 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1064 * there are lots of better ways to do this, but this code
1065 * makes sure the first and last page in the file range are
1066 * up to date and ready for cow
1068 if ((pos & (PAGE_CACHE_SIZE - 1))) {
1069 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1070 if (!PageUptodate(pinned[0])) {
1071 ret = btrfs_readpage(NULL, pinned[0]);
1073 wait_on_page_locked(pinned[0]);
1075 unlock_page(pinned[0]);
1078 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
1079 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1080 if (!PageUptodate(pinned[1])) {
1081 ret = btrfs_readpage(NULL, pinned[1]);
1083 wait_on_page_locked(pinned[1]);
1085 unlock_page(pinned[1]);
1090 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1091 size_t write_bytes = min(count, nrptrs *
1092 (size_t)PAGE_CACHE_SIZE -
1094 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1097 WARN_ON(num_pages > nrptrs);
1098 memset(pages, 0, sizeof(struct page *) * nrptrs);
1100 ret = btrfs_check_data_free_space(root, inode, write_bytes);
1104 ret = prepare_pages(root, file, pages, num_pages,
1105 pos, first_index, last_index,
1108 btrfs_free_reserved_data_space(root, inode,
1113 ret = btrfs_copy_from_user(pos, num_pages,
1114 write_bytes, pages, buf);
1116 btrfs_free_reserved_data_space(root, inode,
1118 btrfs_drop_pages(pages, num_pages);
1122 ret = dirty_and_release_pages(NULL, root, file, pages,
1123 num_pages, pos, write_bytes);
1124 btrfs_drop_pages(pages, num_pages);
1126 btrfs_free_reserved_data_space(root, inode,
1132 btrfs_fdatawrite_range(inode->i_mapping, pos,
1133 pos + write_bytes - 1,
1136 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1139 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1140 btrfs_btree_balance_dirty(root, 1);
1141 btrfs_throttle(root);
1145 count -= write_bytes;
1147 num_written += write_bytes;
1152 mutex_unlock(&inode->i_mutex);
1159 page_cache_release(pinned[0]);
1161 page_cache_release(pinned[1]);
1165 * we want to make sure fsync finds this change
1166 * but we haven't joined a transaction running right now.
1168 * Later on, someone is sure to update the inode and get the
1169 * real transid recorded.
1171 * We set last_trans now to the fs_info generation + 1,
1172 * this will either be one more than the running transaction
1173 * or the generation used for the next transaction if there isn't
1174 * one running right now.
1176 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1178 if (num_written > 0 && will_write) {
1179 struct btrfs_trans_handle *trans;
1181 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1185 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1186 trans = btrfs_start_transaction(root, 1);
1187 ret = btrfs_log_dentry_safe(trans, root,
1190 ret = btrfs_sync_log(trans, root);
1192 btrfs_end_transaction(trans, root);
1194 btrfs_commit_transaction(trans, root);
1196 btrfs_commit_transaction(trans, root);
1199 if (file->f_flags & O_DIRECT) {
1200 invalidate_mapping_pages(inode->i_mapping,
1201 start_pos >> PAGE_CACHE_SHIFT,
1202 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1205 current->backing_dev_info = NULL;
1206 return num_written ? num_written : err;
1209 int btrfs_release_file(struct inode *inode, struct file *filp)
1212 * ordered_data_close is set by settattr when we are about to truncate
1213 * a file from a non-zero size to a zero size. This tries to
1214 * flush down new bytes that may have been written if the
1215 * application were using truncate to replace a file in place.
1217 if (BTRFS_I(inode)->ordered_data_close) {
1218 BTRFS_I(inode)->ordered_data_close = 0;
1219 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1220 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1221 filemap_flush(inode->i_mapping);
1223 if (filp->private_data)
1224 btrfs_ioctl_trans_end(filp);
1229 * fsync call for both files and directories. This logs the inode into
1230 * the tree log instead of forcing full commits whenever possible.
1232 * It needs to call filemap_fdatawait so that all ordered extent updates are
1233 * in the metadata btree are up to date for copying to the log.
1235 * It drops the inode mutex before doing the tree log commit. This is an
1236 * important optimization for directories because holding the mutex prevents
1237 * new operations on the dir while we write to disk.
1239 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1241 struct inode *inode = dentry->d_inode;
1242 struct btrfs_root *root = BTRFS_I(inode)->root;
1244 struct btrfs_trans_handle *trans;
1247 * check the transaction that last modified this inode
1248 * and see if its already been committed
1250 if (!BTRFS_I(inode)->last_trans)
1253 mutex_lock(&root->fs_info->trans_mutex);
1254 if (BTRFS_I(inode)->last_trans <=
1255 root->fs_info->last_trans_committed) {
1256 BTRFS_I(inode)->last_trans = 0;
1257 mutex_unlock(&root->fs_info->trans_mutex);
1260 mutex_unlock(&root->fs_info->trans_mutex);
1263 filemap_fdatawrite(inode->i_mapping);
1264 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1268 * ok we haven't committed the transaction yet, lets do a commit
1270 if (file && file->private_data)
1271 btrfs_ioctl_trans_end(file);
1273 trans = btrfs_start_transaction(root, 1);
1279 ret = btrfs_log_dentry_safe(trans, root, dentry);
1283 /* we've logged all the items and now have a consistent
1284 * version of the file in the log. It is possible that
1285 * someone will come in and modify the file, but that's
1286 * fine because the log is consistent on disk, and we
1287 * have references to all of the file's extents
1289 * It is possible that someone will come in and log the
1290 * file again, but that will end up using the synchronization
1291 * inside btrfs_sync_log to keep things safe.
1293 mutex_unlock(&dentry->d_inode->i_mutex);
1296 ret = btrfs_commit_transaction(trans, root);
1298 ret = btrfs_sync_log(trans, root);
1300 ret = btrfs_end_transaction(trans, root);
1302 ret = btrfs_commit_transaction(trans, root);
1304 mutex_lock(&dentry->d_inode->i_mutex);
1306 return ret > 0 ? EIO : ret;
1309 static struct vm_operations_struct btrfs_file_vm_ops = {
1310 .fault = filemap_fault,
1311 .page_mkwrite = btrfs_page_mkwrite,
1314 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1316 vma->vm_ops = &btrfs_file_vm_ops;
1317 file_accessed(filp);
1321 struct file_operations btrfs_file_operations = {
1322 .llseek = generic_file_llseek,
1323 .read = do_sync_read,
1324 .aio_read = generic_file_aio_read,
1325 .splice_read = generic_file_splice_read,
1326 .write = btrfs_file_write,
1327 .mmap = btrfs_file_mmap,
1328 .open = generic_file_open,
1329 .release = btrfs_release_file,
1330 .fsync = btrfs_sync_file,
1331 .unlocked_ioctl = btrfs_ioctl,
1332 #ifdef CONFIG_COMPAT
1333 .compat_ioctl = btrfs_ioctl,