2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
41 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
42 struct page **prepared_pages,
43 const char __user * buf)
47 int offset = pos & (PAGE_CACHE_SIZE - 1);
49 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
50 size_t count = min_t(size_t,
51 PAGE_CACHE_SIZE - offset, write_bytes);
52 struct page *page = prepared_pages[i];
53 fault_in_pages_readable(buf, count);
55 /* Copy data from userspace to the current page */
57 page_fault = __copy_from_user(page_address(page) + offset,
59 /* Flush processor's dcache for this page */
60 flush_dcache_page(page);
68 return page_fault ? -EFAULT : 0;
71 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
74 for (i = 0; i < num_pages; i++) {
77 unlock_page(pages[i]);
78 mark_page_accessed(pages[i]);
79 page_cache_release(pages[i]);
83 static int insert_inline_extent(struct btrfs_trans_handle *trans,
84 struct btrfs_root *root, struct inode *inode,
85 u64 offset, ssize_t size,
86 struct page *page, size_t page_offset)
89 struct btrfs_path *path;
90 struct extent_buffer *leaf;
93 struct btrfs_file_extent_item *ei;
98 path = btrfs_alloc_path();
102 btrfs_set_trans_block_group(trans, inode);
104 key.objectid = inode->i_ino;
106 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
107 BUG_ON(size >= PAGE_CACHE_SIZE);
108 datasize = btrfs_file_extent_calc_inline_size(size);
110 ret = btrfs_insert_empty_item(trans, root, path, &key,
116 leaf = path->nodes[0];
117 ei = btrfs_item_ptr(leaf, path->slots[0],
118 struct btrfs_file_extent_item);
119 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
120 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
121 ptr = btrfs_file_extent_inline_start(ei);
123 kaddr = kmap_atomic(page, KM_USER0);
124 write_extent_buffer(leaf, kaddr + page_offset, ptr, size);
125 kunmap_atomic(kaddr, KM_USER0);
126 btrfs_mark_buffer_dirty(leaf);
128 btrfs_free_path(path);
132 static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
133 struct btrfs_root *root,
142 struct inode *inode = file->f_path.dentry->d_inode;
143 struct extent_map *em;
144 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
148 u64 end_of_last_block;
149 u64 end_pos = pos + write_bytes;
150 loff_t isize = i_size_read(inode);
152 em = alloc_extent_map(GFP_NOFS);
156 em->bdev = inode->i_sb->s_bdev;
158 start_pos = pos & ~((u64)root->sectorsize - 1);
159 num_blocks = (write_bytes + pos - start_pos + root->sectorsize - 1) >>
162 down_read(&BTRFS_I(inode)->root->snap_sem);
163 end_of_last_block = start_pos + (num_blocks << inode->i_blkbits) - 1;
164 lock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
165 mutex_lock(&root->fs_info->fs_mutex);
166 trans = btrfs_start_transaction(root, 1);
171 btrfs_set_trans_block_group(trans, inode);
172 inode->i_blocks += num_blocks << 3;
175 if ((end_of_last_block & 4095) == 0) {
176 printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
178 set_extent_uptodate(em_tree, start_pos, end_of_last_block, GFP_NOFS);
180 /* FIXME...EIEIO, ENOSPC and more */
182 /* insert any holes we need to create */
183 if (inode->i_size < start_pos) {
184 u64 last_pos_in_file;
186 u64 mask = root->sectorsize - 1;
187 last_pos_in_file = (isize + mask) & ~mask;
188 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
190 if (last_pos_in_file < start_pos) {
191 err = btrfs_drop_extents(trans, root, inode,
193 last_pos_in_file + hole_size,
198 hole_size >>= inode->i_blkbits;
199 err = btrfs_insert_file_extent(trans, root,
209 * either allocate an extent for the new bytes or setup the key
210 * to show we are doing inline data in the extent
212 if (isize >= PAGE_CACHE_SIZE || pos + write_bytes < inode->i_size ||
213 pos + write_bytes - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
215 for (i = 0; i < num_pages; i++) {
216 struct page *p = pages[i];
220 last_end = pages[num_pages -1]->index << PAGE_CACHE_SHIFT;
221 last_end += PAGE_CACHE_SIZE - 1;
222 set_extent_delalloc(em_tree, start_pos, end_of_last_block,
225 struct page *p = pages[0];
226 /* step one, delete the existing extents in this range */
227 /* FIXME blocksize != pagesize */
228 err = btrfs_drop_extents(trans, root, inode, start_pos,
229 (pos + write_bytes + root->sectorsize -1) &
230 ~((u64)root->sectorsize - 1), &hint_block);
234 err = insert_inline_extent(trans, root, inode, start_pos,
235 end_pos - start_pos, p, 0);
237 em->start = start_pos;
238 em->end = end_pos - 1;
239 em->block_start = EXTENT_MAP_INLINE;
240 em->block_end = EXTENT_MAP_INLINE;
241 add_extent_mapping(em_tree, em);
243 if (end_pos > isize) {
244 i_size_write(inode, end_pos);
245 btrfs_update_inode(trans, root, inode);
248 err = btrfs_end_transaction(trans, root);
250 mutex_unlock(&root->fs_info->fs_mutex);
251 unlock_extent(em_tree, start_pos, end_of_last_block, GFP_NOFS);
253 up_read(&BTRFS_I(inode)->root->snap_sem);
257 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
259 struct extent_map *em;
260 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
263 em = lookup_extent_mapping(em_tree, start, end);
266 remove_extent_mapping(em_tree, em);
269 /* once for the tree*/
276 * this is very complex, but the basic idea is to drop all extents
277 * in the range start - end. hint_block is filled in with a block number
278 * that would be a good hint to the block allocator for this file.
280 * If an extent intersects the range but is not entirely inside the range
281 * it is either truncated or split. Anything entirely inside the range
282 * is deleted from the tree.
284 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
285 struct btrfs_root *root, struct inode *inode,
286 u64 start, u64 end, u64 *hint_block)
289 struct btrfs_key key;
290 struct extent_buffer *leaf;
292 struct btrfs_file_extent_item *extent;
295 struct btrfs_file_extent_item old;
296 struct btrfs_path *path;
297 u64 search_start = start;
304 btrfs_drop_extent_cache(inode, start, end - 1);
306 path = btrfs_alloc_path();
311 btrfs_release_path(root, path);
312 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
317 if (path->slots[0] == 0) {
329 leaf = path->nodes[0];
330 slot = path->slots[0];
332 btrfs_item_key_to_cpu(leaf, &key, slot);
333 if (key.offset >= end || key.objectid != inode->i_ino) {
336 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
340 search_start = key.offset;
343 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
344 extent = btrfs_item_ptr(leaf, slot,
345 struct btrfs_file_extent_item);
346 found_type = btrfs_file_extent_type(leaf, extent);
347 if (found_type == BTRFS_FILE_EXTENT_REG) {
348 extent_end = key.offset +
349 (btrfs_file_extent_num_blocks(leaf, extent) <<
352 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
353 struct btrfs_item *item;
354 item = btrfs_item_nr(leaf, slot);
356 extent_end = key.offset +
357 btrfs_file_extent_inline_len(leaf, item);
360 extent_end = search_start;
363 /* we found nothing we can drop */
364 if ((!found_extent && !found_inline) ||
365 search_start >= extent_end) {
368 nritems = btrfs_header_nritems(leaf);
369 if (slot >= nritems - 1) {
370 nextret = btrfs_next_leaf(root, path);
380 /* FIXME, there's only one inline extent allowed right now */
382 u64 mask = root->sectorsize - 1;
383 search_start = (extent_end + mask) & ~mask;
385 search_start = extent_end;
387 if (end < extent_end && end >= key.offset) {
390 btrfs_file_extent_disk_blocknr(leaf,extent);
391 u64 disk_num_blocks =
392 btrfs_file_extent_disk_num_blocks(leaf,
394 read_extent_buffer(leaf, &old,
395 (unsigned long)extent,
397 if (disk_blocknr != 0) {
398 ret = btrfs_inc_extent_ref(trans, root,
399 disk_blocknr, disk_num_blocks);
403 WARN_ON(found_inline);
406 /* truncate existing extent */
407 if (start > key.offset) {
411 WARN_ON(start & (root->sectorsize - 1));
413 new_num = (start - key.offset) >>
415 old_num = btrfs_file_extent_num_blocks(leaf,
418 btrfs_file_extent_disk_blocknr(leaf,
420 if (btrfs_file_extent_disk_blocknr(leaf,
423 (old_num - new_num) << 3;
425 btrfs_set_file_extent_num_blocks(leaf,
428 btrfs_mark_buffer_dirty(leaf);
433 /* delete the entire extent */
435 u64 disk_blocknr = 0;
436 u64 disk_num_blocks = 0;
437 u64 extent_num_blocks = 0;
440 btrfs_file_extent_disk_blocknr(leaf,
443 btrfs_file_extent_disk_num_blocks(leaf,
446 btrfs_file_extent_num_blocks(leaf,
449 btrfs_file_extent_disk_blocknr(leaf,
452 ret = btrfs_del_item(trans, root, path);
453 /* TODO update progress marker and return */
455 btrfs_release_path(root, path);
457 if (found_extent && disk_blocknr != 0) {
458 inode->i_blocks -= extent_num_blocks << 3;
459 ret = btrfs_free_extent(trans, root,
465 if (!bookend && search_start >= end) {
472 /* create bookend, splitting the extent in two */
473 if (bookend && found_extent) {
474 struct btrfs_key ins;
475 ins.objectid = inode->i_ino;
477 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
478 btrfs_release_path(root, path);
479 ret = btrfs_insert_empty_item(trans, root, path, &ins,
482 leaf = path->nodes[0];
484 btrfs_print_leaf(root, leaf);
485 printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
488 extent = btrfs_item_ptr(leaf, path->slots[0],
489 struct btrfs_file_extent_item);
490 write_extent_buffer(leaf, &old,
491 (unsigned long)extent, sizeof(old));
493 btrfs_set_file_extent_offset(leaf, extent,
494 le64_to_cpu(old.offset) +
495 ((end - key.offset) >> inode->i_blkbits));
496 WARN_ON(le64_to_cpu(old.num_blocks) <
497 (extent_end - end) >> inode->i_blkbits);
498 btrfs_set_file_extent_num_blocks(leaf, extent,
499 (extent_end - end) >> inode->i_blkbits);
501 btrfs_set_file_extent_type(leaf, extent,
502 BTRFS_FILE_EXTENT_REG);
503 btrfs_mark_buffer_dirty(path->nodes[0]);
504 if (le64_to_cpu(old.disk_blocknr) != 0) {
506 btrfs_file_extent_num_blocks(leaf,
514 btrfs_free_path(path);
519 * this gets pages into the page cache and locks them down
521 static int prepare_pages(struct btrfs_root *root,
526 unsigned long first_index,
527 unsigned long last_index,
531 unsigned long index = pos >> PAGE_CACHE_SHIFT;
532 struct inode *inode = file->f_path.dentry->d_inode;
537 start_pos = pos & ~((u64)root->sectorsize - 1);
538 num_blocks = (write_bytes + pos - start_pos + root->sectorsize - 1) >>
541 memset(pages, 0, num_pages * sizeof(struct page *));
543 for (i = 0; i < num_pages; i++) {
544 pages[i] = grab_cache_page(inode->i_mapping, index + i);
549 cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
550 wait_on_page_writeback(pages[i]);
551 set_page_extent_mapped(pages[i]);
552 WARN_ON(!PageLocked(pages[i]));
557 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
558 size_t count, loff_t *ppos)
561 size_t num_written = 0;
564 struct inode *inode = file->f_path.dentry->d_inode;
565 struct btrfs_root *root = BTRFS_I(inode)->root;
566 struct page **pages = NULL;
568 struct page *pinned[2];
569 unsigned long first_index;
570 unsigned long last_index;
572 nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
573 PAGE_CACHE_SIZE / (sizeof(struct page *)));
576 if (file->f_flags & O_DIRECT)
579 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
580 current->backing_dev_info = inode->i_mapping->backing_dev_info;
581 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
586 err = remove_suid(file->f_path.dentry);
589 file_update_time(file);
591 pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
593 mutex_lock(&inode->i_mutex);
594 first_index = pos >> PAGE_CACHE_SHIFT;
595 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
598 * there are lots of better ways to do this, but this code
599 * makes sure the first and last page in the file range are
600 * up to date and ready for cow
602 if ((pos & (PAGE_CACHE_SIZE - 1))) {
603 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
604 if (!PageUptodate(pinned[0])) {
605 ret = btrfs_readpage(NULL, pinned[0]);
607 wait_on_page_locked(pinned[0]);
609 unlock_page(pinned[0]);
612 if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
613 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
614 if (!PageUptodate(pinned[1])) {
615 ret = btrfs_readpage(NULL, pinned[1]);
617 wait_on_page_locked(pinned[1]);
619 unlock_page(pinned[1]);
624 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
625 size_t write_bytes = min(count, nrptrs *
626 (size_t)PAGE_CACHE_SIZE -
628 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
631 WARN_ON(num_pages > nrptrs);
632 memset(pages, 0, sizeof(pages));
633 ret = prepare_pages(root, file, pages, num_pages,
634 pos, first_index, last_index,
639 ret = btrfs_copy_from_user(pos, num_pages,
640 write_bytes, pages, buf);
642 btrfs_drop_pages(pages, num_pages);
646 ret = dirty_and_release_pages(NULL, root, file, pages,
647 num_pages, pos, write_bytes);
648 btrfs_drop_pages(pages, num_pages);
653 count -= write_bytes;
655 num_written += write_bytes;
657 balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
658 btrfs_btree_balance_dirty(root, 1);
661 mutex_unlock(&inode->i_mutex);
665 page_cache_release(pinned[0]);
667 page_cache_release(pinned[1]);
669 current->backing_dev_info = NULL;
670 return num_written ? num_written : err;
673 static int btrfs_sync_file(struct file *file,
674 struct dentry *dentry, int datasync)
676 struct inode *inode = dentry->d_inode;
677 struct btrfs_root *root = BTRFS_I(inode)->root;
679 struct btrfs_trans_handle *trans;
682 * check the transaction that last modified this inode
683 * and see if its already been committed
685 mutex_lock(&root->fs_info->fs_mutex);
686 if (!BTRFS_I(inode)->last_trans)
688 mutex_lock(&root->fs_info->trans_mutex);
689 if (BTRFS_I(inode)->last_trans <=
690 root->fs_info->last_trans_committed) {
691 BTRFS_I(inode)->last_trans = 0;
692 mutex_unlock(&root->fs_info->trans_mutex);
695 mutex_unlock(&root->fs_info->trans_mutex);
698 * ok we haven't committed the transaction yet, lets do a commit
700 trans = btrfs_start_transaction(root, 1);
705 ret = btrfs_commit_transaction(trans, root);
707 mutex_unlock(&root->fs_info->fs_mutex);
708 return ret > 0 ? EIO : ret;
711 static struct vm_operations_struct btrfs_file_vm_ops = {
712 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
713 .nopage = filemap_nopage,
714 .populate = filemap_populate,
716 .fault = filemap_fault,
718 .page_mkwrite = btrfs_page_mkwrite,
721 static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
723 vma->vm_ops = &btrfs_file_vm_ops;
728 struct file_operations btrfs_file_operations = {
729 .llseek = generic_file_llseek,
730 .read = do_sync_read,
731 .aio_read = generic_file_aio_read,
732 .write = btrfs_file_write,
733 .mmap = btrfs_file_mmap,
734 .open = generic_file_open,
735 .fsync = btrfs_sync_file,
736 .unlocked_ioctl = btrfs_ioctl,
738 .compat_ioctl = btrfs_ioctl,