2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
42 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
43 struct page **prepared_pages,
44 const char __user * buf)
48 int offset = pos & (PAGE_CACHE_SIZE - 1);
50 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
51 size_t count = min_t(size_t,
52 PAGE_CACHE_SIZE - offset, write_bytes);
53 struct page *page = prepared_pages[i];
54 fault_in_pages_readable(buf, count);
56 /* Copy data from userspace to the current page */
58 page_fault = __copy_from_user(page_address(page) + offset,
60 /* Flush processor's dcache for this page */
61 flush_dcache_page(page);
69 return page_fault ? -EFAULT : 0;
72 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
75 for (i = 0; i < num_pages; i++) {
78 ClearPageChecked(pages[i]);
79 unlock_page(pages[i]);
80 mark_page_accessed(pages[i]);
81 page_cache_release(pages[i]);
85 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
86 struct btrfs_root *root, struct inode *inode,
87 u64 offset, size_t size,
88 struct page **pages, size_t page_offset,
92 struct btrfs_path *path;
93 struct extent_buffer *leaf;
96 struct btrfs_file_extent_item *ei;
104 path = btrfs_alloc_path();
108 btrfs_set_trans_block_group(trans, inode);
110 key.objectid = inode->i_ino;
112 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
114 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
120 struct btrfs_key found_key;
122 if (path->slots[0] == 0)
126 leaf = path->nodes[0];
127 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
129 if (found_key.objectid != inode->i_ino)
132 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
134 ei = btrfs_item_ptr(leaf, path->slots[0],
135 struct btrfs_file_extent_item);
137 if (btrfs_file_extent_type(leaf, ei) !=
138 BTRFS_FILE_EXTENT_INLINE) {
141 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
148 leaf = path->nodes[0];
149 ei = btrfs_item_ptr(leaf, path->slots[0],
150 struct btrfs_file_extent_item);
152 if (btrfs_file_extent_type(leaf, ei) !=
153 BTRFS_FILE_EXTENT_INLINE) {
155 btrfs_print_leaf(root, leaf);
156 printk("found wasn't inline offset %Lu inode %lu\n",
157 offset, inode->i_ino);
160 found_size = btrfs_file_extent_inline_len(leaf,
161 btrfs_item_nr(leaf, path->slots[0]));
162 found_end = key.offset + found_size;
164 if (found_end < offset + size) {
165 btrfs_release_path(root, path);
166 ret = btrfs_search_slot(trans, root, &key, path,
167 offset + size - found_end, 1);
170 ret = btrfs_extend_item(trans, root, path,
171 offset + size - found_end);
176 leaf = path->nodes[0];
177 ei = btrfs_item_ptr(leaf, path->slots[0],
178 struct btrfs_file_extent_item);
179 inode->i_blocks += (offset + size - found_end) >> 9;
181 if (found_end < offset) {
182 ptr = btrfs_file_extent_inline_start(ei) + found_size;
183 memset_extent_buffer(leaf, 0, ptr, offset - found_end);
187 btrfs_release_path(root, path);
188 datasize = offset + size - key.offset;
189 inode->i_blocks += datasize >> 9;
190 datasize = btrfs_file_extent_calc_inline_size(datasize);
191 ret = btrfs_insert_empty_item(trans, root, path, &key,
195 printk("got bad ret %d\n", ret);
198 leaf = path->nodes[0];
199 ei = btrfs_item_ptr(leaf, path->slots[0],
200 struct btrfs_file_extent_item);
201 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
202 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
204 ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
210 kaddr = kmap_atomic(page, KM_USER0);
211 cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
212 write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
213 kunmap_atomic(kaddr, KM_USER0);
217 if (i >= num_pages) {
218 printk("i %d num_pages %d\n", i, num_pages);
222 btrfs_mark_buffer_dirty(leaf);
224 btrfs_free_path(path);
228 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
229 struct btrfs_root *root,
238 struct inode *inode = fdentry(file)->d_inode;
239 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
243 u64 end_of_last_block;
244 u64 end_pos = pos + write_bytes;
247 loff_t isize = i_size_read(inode);
249 start_pos = pos & ~((u64)root->sectorsize - 1);
250 num_bytes = (write_bytes + pos - start_pos +
251 root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
253 end_of_last_block = start_pos + num_bytes - 1;
255 lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
256 trans = btrfs_start_transaction(root, 1);
261 btrfs_set_trans_block_group(trans, inode);
264 if ((end_of_last_block & 4095) == 0) {
265 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
267 set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
269 /* FIXME...EIEIO, ENOSPC and more */
270 /* insert any holes we need to create */
271 if (isize < start_pos) {
272 u64 last_pos_in_file;
274 u64 mask = root->sectorsize - 1;
275 last_pos_in_file = (isize + mask) & ~mask;
276 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
278 btrfs_wait_ordered_range(inode, last_pos_in_file,
279 last_pos_in_file + hole_size);
280 mutex_lock(&BTRFS_I(inode)->extent_mutex);
281 err = btrfs_drop_extents(trans, root, inode,
283 last_pos_in_file + hole_size,
289 err = btrfs_insert_file_extent(trans, root,
293 btrfs_drop_extent_cache(inode, last_pos_in_file,
294 last_pos_in_file + hole_size -1);
295 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
296 btrfs_check_file(root, inode);
303 * either allocate an extent for the new bytes or setup the key
304 * to show we are doing inline data in the extent
306 inline_size = end_pos;
307 if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
308 inline_size > root->fs_info->max_inline ||
309 (inline_size & (root->sectorsize -1)) == 0 ||
310 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
311 /* check for reserved extents on each page, we don't want
312 * to reset the delalloc bit on things that already have
315 set_extent_delalloc(io_tree, start_pos,
316 end_of_last_block, GFP_NOFS);
317 for (i = 0; i < num_pages; i++) {
318 struct page *p = pages[i];
325 /* step one, delete the existing extents in this range */
326 aligned_end = (pos + write_bytes + root->sectorsize - 1) &
327 ~((u64)root->sectorsize - 1);
328 mutex_lock(&BTRFS_I(inode)->extent_mutex);
329 err = btrfs_drop_extents(trans, root, inode, start_pos,
330 aligned_end, aligned_end, &hint_byte);
333 if (isize > inline_size)
334 inline_size = min_t(u64, isize, aligned_end);
335 inline_size -= start_pos;
336 err = insert_inline_extent(trans, root, inode, start_pos,
337 inline_size, pages, 0, num_pages);
338 btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
340 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
343 if (end_pos > isize) {
344 i_size_write(inode, end_pos);
346 BTRFS_I(inode)->disk_i_size = end_pos;
347 btrfs_update_inode(trans, root, inode);
350 err = btrfs_end_transaction(trans, root);
352 unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
356 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
358 struct extent_map *em;
359 struct extent_map *split = NULL;
360 struct extent_map *split2 = NULL;
361 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
362 u64 len = end - start + 1;
366 WARN_ON(end < start);
367 if (end == (u64)-1) {
373 split = alloc_extent_map(GFP_NOFS);
375 split2 = alloc_extent_map(GFP_NOFS);
377 spin_lock(&em_tree->lock);
378 em = lookup_extent_mapping(em_tree, start, len);
380 spin_unlock(&em_tree->lock);
383 if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
384 printk(KERN_CRIT "inode %lu trying to drop pinned "
385 "extent start %llu end %llu, em [%llu %llu]\n",
387 (unsigned long long)start,
388 (unsigned long long)end,
389 (unsigned long long)em->start,
390 (unsigned long long)em->len);
392 remove_extent_mapping(em_tree, em);
394 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
396 split->start = em->start;
397 split->len = start - em->start;
398 split->block_start = em->block_start;
399 split->bdev = em->bdev;
400 split->flags = em->flags;
401 ret = add_extent_mapping(em_tree, split);
403 free_extent_map(split);
407 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
408 testend && em->start + em->len > start + len) {
409 u64 diff = start + len - em->start;
411 split->start = start + len;
412 split->len = em->start + em->len - (start + len);
413 split->bdev = em->bdev;
414 split->flags = em->flags;
416 split->block_start = em->block_start + diff;
418 ret = add_extent_mapping(em_tree, split);
420 free_extent_map(split);
423 spin_unlock(&em_tree->lock);
427 /* once for the tree*/
431 free_extent_map(split);
433 free_extent_map(split2);
437 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
441 struct btrfs_path *path;
442 struct btrfs_key found_key;
443 struct extent_buffer *leaf;
444 struct btrfs_file_extent_item *extent;
453 path = btrfs_alloc_path();
454 ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
457 nritems = btrfs_header_nritems(path->nodes[0]);
458 if (path->slots[0] >= nritems) {
459 ret = btrfs_next_leaf(root, path);
462 nritems = btrfs_header_nritems(path->nodes[0]);
464 slot = path->slots[0];
465 leaf = path->nodes[0];
466 btrfs_item_key_to_cpu(leaf, &found_key, slot);
467 if (found_key.objectid != inode->i_ino)
469 if (found_key.type != BTRFS_EXTENT_DATA_KEY)
472 if (found_key.offset < last_offset) {
474 btrfs_print_leaf(root, leaf);
475 printk("inode %lu found offset %Lu expected %Lu\n",
476 inode->i_ino, found_key.offset, last_offset);
480 extent = btrfs_item_ptr(leaf, slot,
481 struct btrfs_file_extent_item);
482 found_type = btrfs_file_extent_type(leaf, extent);
483 if (found_type == BTRFS_FILE_EXTENT_REG) {
484 extent_end = found_key.offset +
485 btrfs_file_extent_num_bytes(leaf, extent);
486 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
487 struct btrfs_item *item;
488 item = btrfs_item_nr(leaf, slot);
489 extent_end = found_key.offset +
490 btrfs_file_extent_inline_len(leaf, item);
491 extent_end = (extent_end + root->sectorsize - 1) &
492 ~((u64)root->sectorsize -1 );
494 last_offset = extent_end;
497 if (0 && last_offset < inode->i_size) {
499 btrfs_print_leaf(root, leaf);
500 printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
501 last_offset, inode->i_size);
506 btrfs_free_path(path);
512 * this is very complex, but the basic idea is to drop all extents
513 * in the range start - end. hint_block is filled in with a block number
514 * that would be a good hint to the block allocator for this file.
516 * If an extent intersects the range but is not entirely inside the range
517 * it is either truncated or split. Anything entirely inside the range
518 * is deleted from the tree.
520 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
521 struct btrfs_root *root, struct inode *inode,
522 u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
525 u64 search_start = start;
526 struct extent_buffer *leaf;
527 struct btrfs_file_extent_item *extent;
528 struct btrfs_path *path;
529 struct btrfs_key key;
530 struct btrfs_file_extent_item old;
540 btrfs_drop_extent_cache(inode, start, end - 1);
542 path = btrfs_alloc_path();
547 btrfs_release_path(root, path);
548 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
553 if (path->slots[0] == 0) {
565 leaf = path->nodes[0];
566 slot = path->slots[0];
568 btrfs_item_key_to_cpu(leaf, &key, slot);
569 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
573 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
574 key.objectid != inode->i_ino) {
578 search_start = key.offset;
581 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
582 extent = btrfs_item_ptr(leaf, slot,
583 struct btrfs_file_extent_item);
584 found_type = btrfs_file_extent_type(leaf, extent);
585 if (found_type == BTRFS_FILE_EXTENT_REG) {
587 btrfs_file_extent_disk_bytenr(leaf,
590 *hint_byte = extent_end;
592 extent_end = key.offset +
593 btrfs_file_extent_num_bytes(leaf, extent);
595 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
596 struct btrfs_item *item;
597 item = btrfs_item_nr(leaf, slot);
599 extent_end = key.offset +
600 btrfs_file_extent_inline_len(leaf, item);
603 extent_end = search_start;
606 /* we found nothing we can drop */
607 if ((!found_extent && !found_inline) ||
608 search_start >= extent_end) {
611 nritems = btrfs_header_nritems(leaf);
612 if (slot >= nritems - 1) {
613 nextret = btrfs_next_leaf(root, path);
624 u64 mask = root->sectorsize - 1;
625 search_start = (extent_end + mask) & ~mask;
627 search_start = extent_end;
628 if (end <= extent_end && start >= key.offset && found_inline) {
629 *hint_byte = EXTENT_MAP_INLINE;
632 if (end < extent_end && end >= key.offset) {
635 btrfs_file_extent_disk_bytenr(leaf, extent);
637 btrfs_file_extent_disk_num_bytes(leaf,
639 read_extent_buffer(leaf, &old,
640 (unsigned long)extent,
642 if (disk_bytenr != 0) {
643 ret = btrfs_inc_extent_ref(trans, root,
644 disk_bytenr, disk_num_bytes,
645 root->root_key.objectid,
652 if (found_inline && start <= key.offset)
655 /* truncate existing extent */
656 if (start > key.offset) {
660 WARN_ON(start & (root->sectorsize - 1));
662 new_num = start - key.offset;
663 old_num = btrfs_file_extent_num_bytes(leaf,
666 btrfs_file_extent_disk_bytenr(leaf,
668 if (btrfs_file_extent_disk_bytenr(leaf,
670 dec_i_blocks(inode, old_num - new_num);
672 btrfs_set_file_extent_num_bytes(leaf, extent,
674 btrfs_mark_buffer_dirty(leaf);
675 } else if (key.offset < inline_limit &&
676 (end > extent_end) &&
677 (inline_limit < extent_end)) {
679 new_size = btrfs_file_extent_calc_inline_size(
680 inline_limit - key.offset);
681 dec_i_blocks(inode, (extent_end - key.offset) -
682 (inline_limit - key.offset));
683 btrfs_truncate_item(trans, root, path,
687 /* delete the entire extent */
690 u64 disk_num_bytes = 0;
691 u64 extent_num_bytes = 0;
695 root_gen = btrfs_header_generation(leaf);
696 root_owner = btrfs_header_owner(leaf);
699 btrfs_file_extent_disk_bytenr(leaf,
702 btrfs_file_extent_disk_num_bytes(leaf,
705 btrfs_file_extent_num_bytes(leaf, extent);
707 btrfs_file_extent_disk_bytenr(leaf,
710 ret = btrfs_del_item(trans, root, path);
711 /* TODO update progress marker and return */
713 btrfs_release_path(root, path);
715 if (found_extent && disk_bytenr != 0) {
716 dec_i_blocks(inode, extent_num_bytes);
717 ret = btrfs_free_extent(trans, root,
721 root_gen, inode->i_ino,
726 if (!bookend && search_start >= end) {
733 if (bookend && found_inline && start <= key.offset) {
735 new_size = btrfs_file_extent_calc_inline_size(
737 dec_i_blocks(inode, (extent_end - key.offset) -
739 btrfs_truncate_item(trans, root, path, new_size, 0);
741 /* create bookend, splitting the extent in two */
742 if (bookend && found_extent) {
743 struct btrfs_key ins;
744 ins.objectid = inode->i_ino;
746 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
747 btrfs_release_path(root, path);
748 ret = btrfs_insert_empty_item(trans, root, path, &ins,
751 leaf = path->nodes[0];
753 btrfs_print_leaf(root, leaf);
754 printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
757 extent = btrfs_item_ptr(leaf, path->slots[0],
758 struct btrfs_file_extent_item);
759 write_extent_buffer(leaf, &old,
760 (unsigned long)extent, sizeof(old));
762 btrfs_set_file_extent_offset(leaf, extent,
763 le64_to_cpu(old.offset) + end - key.offset);
764 WARN_ON(le64_to_cpu(old.num_bytes) <
766 btrfs_set_file_extent_num_bytes(leaf, extent,
768 btrfs_set_file_extent_type(leaf, extent,
769 BTRFS_FILE_EXTENT_REG);
771 btrfs_mark_buffer_dirty(path->nodes[0]);
772 if (le64_to_cpu(old.disk_bytenr) != 0) {
774 btrfs_file_extent_num_bytes(leaf,
782 btrfs_free_path(path);
783 btrfs_check_file(root, inode);
788 * this gets pages into the page cache and locks them down
790 static int prepare_pages(struct btrfs_root *root, struct file *file,
791 struct page **pages, size_t num_pages,
792 loff_t pos, unsigned long first_index,
793 unsigned long last_index, size_t write_bytes)
796 unsigned long index = pos >> PAGE_CACHE_SHIFT;
797 struct inode *inode = fdentry(file)->d_inode;
802 start_pos = pos & ~((u64)root->sectorsize - 1);
803 last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
805 memset(pages, 0, num_pages * sizeof(struct page *));
807 for (i = 0; i < num_pages; i++) {
808 pages[i] = grab_cache_page(inode->i_mapping, index + i);
813 wait_on_page_writeback(pages[i]);
815 if (start_pos < inode->i_size) {
816 struct btrfs_ordered_extent *ordered;
817 lock_extent(&BTRFS_I(inode)->io_tree,
818 start_pos, last_pos - 1, GFP_NOFS);
819 ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
821 ordered->file_offset + ordered->len > start_pos &&
822 ordered->file_offset < last_pos) {
823 btrfs_put_ordered_extent(ordered);
824 unlock_extent(&BTRFS_I(inode)->io_tree,
825 start_pos, last_pos - 1, GFP_NOFS);
826 for (i = 0; i < num_pages; i++) {
827 unlock_page(pages[i]);
828 page_cache_release(pages[i]);
830 btrfs_wait_ordered_range(inode, start_pos,
831 last_pos - start_pos);
835 btrfs_put_ordered_extent(ordered);
837 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
838 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
840 unlock_extent(&BTRFS_I(inode)->io_tree,
841 start_pos, last_pos - 1, GFP_NOFS);
843 for (i = 0; i < num_pages; i++) {
844 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
845 ClearPageDirty(pages[i]);
847 cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
849 set_page_extent_mapped(pages[i]);
850 WARN_ON(!PageLocked(pages[i]));
855 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
856 size_t count, loff_t *ppos)
860 ssize_t num_written = 0;
863 struct inode *inode = fdentry(file)->d_inode;
864 struct btrfs_root *root = BTRFS_I(inode)->root;
865 struct page **pages = NULL;
867 struct page *pinned[2];
868 unsigned long first_index;
869 unsigned long last_index;
871 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
872 PAGE_CACHE_SIZE / (sizeof(struct page *)));
879 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
880 current->backing_dev_info = inode->i_mapping->backing_dev_info;
881 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
886 #ifdef REMOVE_SUID_PATH
887 err = remove_suid(&file->f_path);
889 err = remove_suid(fdentry(file));
893 file_update_time(file);
895 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
897 mutex_lock(&inode->i_mutex);
898 first_index = pos >> PAGE_CACHE_SHIFT;
899 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
902 * if this is a nodatasum mount, force summing off for the inode
903 * all the time. That way a later mount with summing on won't
906 if (btrfs_test_opt(root, NODATASUM))
907 btrfs_set_flag(inode, NODATASUM);
910 * there are lots of better ways to do this, but this code
911 * makes sure the first and last page in the file range are
912 * up to date and ready for cow
914 if ((pos & (PAGE_CACHE_SIZE - 1))) {
915 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
916 if (!PageUptodate(pinned[0])) {
917 ret = btrfs_readpage(NULL, pinned[0]);
919 wait_on_page_locked(pinned[0]);
921 unlock_page(pinned[0]);
924 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
925 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
926 if (!PageUptodate(pinned[1])) {
927 ret = btrfs_readpage(NULL, pinned[1]);
929 wait_on_page_locked(pinned[1]);
931 unlock_page(pinned[1]);
936 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
937 size_t write_bytes = min(count, nrptrs *
938 (size_t)PAGE_CACHE_SIZE -
940 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
943 WARN_ON(num_pages > nrptrs);
944 memset(pages, 0, sizeof(pages));
946 ret = btrfs_check_free_space(root, write_bytes, 0);
950 ret = prepare_pages(root, file, pages, num_pages,
951 pos, first_index, last_index,
956 ret = btrfs_copy_from_user(pos, num_pages,
957 write_bytes, pages, buf);
959 btrfs_drop_pages(pages, num_pages);
963 ret = dirty_and_release_pages(NULL, root, file, pages,
964 num_pages, pos, write_bytes);
965 btrfs_drop_pages(pages, num_pages);
970 count -= write_bytes;
972 num_written += write_bytes;
974 balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
975 if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
976 btrfs_btree_balance_dirty(root, 1);
977 btrfs_throttle(root);
981 mutex_unlock(&inode->i_mutex);
986 page_cache_release(pinned[0]);
988 page_cache_release(pinned[1]);
991 if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
992 err = sync_page_range(inode, inode->i_mapping,
993 start_pos, num_written);
996 } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
997 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
998 do_sync_file_range(file, start_pos,
999 start_pos + num_written - 1,
1000 SYNC_FILE_RANGE_WRITE |
1001 SYNC_FILE_RANGE_WAIT_AFTER);
1003 do_sync_mapping_range(inode->i_mapping, start_pos,
1004 start_pos + num_written - 1,
1005 SYNC_FILE_RANGE_WRITE |
1006 SYNC_FILE_RANGE_WAIT_AFTER);
1008 invalidate_mapping_pages(inode->i_mapping,
1009 start_pos >> PAGE_CACHE_SHIFT,
1010 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1012 current->backing_dev_info = NULL;
1013 return num_written ? num_written : err;
1016 int btrfs_release_file(struct inode * inode, struct file * filp)
1018 if (filp->private_data)
1019 btrfs_ioctl_trans_end(filp);
1023 static int btrfs_sync_file(struct file *file,
1024 struct dentry *dentry, int datasync)
1026 struct inode *inode = dentry->d_inode;
1027 struct btrfs_root *root = BTRFS_I(inode)->root;
1029 struct btrfs_trans_handle *trans;
1032 * check the transaction that last modified this inode
1033 * and see if its already been committed
1035 if (!BTRFS_I(inode)->last_trans)
1038 mutex_lock(&root->fs_info->trans_mutex);
1039 if (BTRFS_I(inode)->last_trans <=
1040 root->fs_info->last_trans_committed) {
1041 BTRFS_I(inode)->last_trans = 0;
1042 mutex_unlock(&root->fs_info->trans_mutex);
1045 mutex_unlock(&root->fs_info->trans_mutex);
1048 * ok we haven't committed the transaction yet, lets do a commit
1050 if (file->private_data)
1051 btrfs_ioctl_trans_end(file);
1053 trans = btrfs_start_transaction(root, 1);
1058 ret = btrfs_commit_transaction(trans, root);
1060 return ret > 0 ? EIO : ret;
1063 static struct vm_operations_struct btrfs_file_vm_ops = {
1064 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1065 .nopage = filemap_nopage,
1066 .populate = filemap_populate,
1068 .fault = filemap_fault,
1070 .page_mkwrite = btrfs_page_mkwrite,
1073 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
1075 vma->vm_ops = &btrfs_file_vm_ops;
1076 file_accessed(filp);
1080 struct file_operations btrfs_file_operations = {
1081 .llseek = generic_file_llseek,
1082 .read = do_sync_read,
1083 .aio_read = generic_file_aio_read,
1084 .splice_read = generic_file_splice_read,
1085 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1086 .sendfile = generic_file_sendfile,
1088 .write = btrfs_file_write,
1089 .mmap = btrfs_file_mmap,
1090 .open = generic_file_open,
1091 .release = btrfs_release_file,
1092 .fsync = btrfs_sync_file,
1093 .unlocked_ioctl = btrfs_ioctl,
1094 #ifdef CONFIG_COMPAT
1095 .compat_ioctl = btrfs_ioctl,