1 #include <linux/module.h>
2 #include <linux/buffer_head.h>
4 #include <linux/pagemap.h>
5 #include <linux/highmem.h>
6 #include <linux/time.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/smp_lock.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mpage.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/statfs.h>
17 #include "transaction.h"
18 #include "btrfs_inode.h"
20 #include "print-tree.h"
22 struct btrfs_iget_args {
24 struct btrfs_root *root;
27 #define BTRFS_SUPER_MAGIC 0x9123682E
29 static struct inode_operations btrfs_dir_inode_operations;
30 static struct inode_operations btrfs_symlink_inode_operations;
31 static struct inode_operations btrfs_dir_ro_inode_operations;
32 static struct super_operations btrfs_super_ops;
33 static struct file_operations btrfs_dir_file_operations;
34 static struct inode_operations btrfs_file_inode_operations;
35 static struct address_space_operations btrfs_aops;
36 static struct address_space_operations btrfs_symlink_aops;
37 static struct file_operations btrfs_file_operations;
39 static void btrfs_read_locked_inode(struct inode *inode)
41 struct btrfs_path *path;
42 struct btrfs_inode_item *inode_item;
43 struct btrfs_root *root = BTRFS_I(inode)->root;
44 struct btrfs_key location;
45 struct btrfs_block_group_cache *alloc_group;
46 u64 alloc_group_block;
49 path = btrfs_alloc_path();
51 btrfs_init_path(path);
52 mutex_lock(&root->fs_info->fs_mutex);
54 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
55 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
57 btrfs_free_path(path);
60 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
62 struct btrfs_inode_item);
64 inode->i_mode = btrfs_inode_mode(inode_item);
65 inode->i_nlink = btrfs_inode_nlink(inode_item);
66 inode->i_uid = btrfs_inode_uid(inode_item);
67 inode->i_gid = btrfs_inode_gid(inode_item);
68 inode->i_size = btrfs_inode_size(inode_item);
69 inode->i_atime.tv_sec = btrfs_timespec_sec(&inode_item->atime);
70 inode->i_atime.tv_nsec = btrfs_timespec_nsec(&inode_item->atime);
71 inode->i_mtime.tv_sec = btrfs_timespec_sec(&inode_item->mtime);
72 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(&inode_item->mtime);
73 inode->i_ctime.tv_sec = btrfs_timespec_sec(&inode_item->ctime);
74 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(&inode_item->ctime);
75 inode->i_blocks = btrfs_inode_nblocks(inode_item);
76 inode->i_generation = btrfs_inode_generation(inode_item);
77 alloc_group_block = btrfs_inode_block_group(inode_item);
78 ret = radix_tree_gang_lookup(&root->fs_info->block_group_radix,
79 (void **)&alloc_group,
80 alloc_group_block, 1);
82 BTRFS_I(inode)->block_group = alloc_group;
84 btrfs_free_path(path);
87 mutex_unlock(&root->fs_info->fs_mutex);
89 switch (inode->i_mode & S_IFMT) {
92 init_special_inode(inode, inode->i_mode,
93 btrfs_inode_rdev(inode_item));
97 inode->i_mapping->a_ops = &btrfs_aops;
98 inode->i_fop = &btrfs_file_operations;
99 inode->i_op = &btrfs_file_inode_operations;
102 inode->i_fop = &btrfs_dir_file_operations;
103 if (root == root->fs_info->tree_root)
104 inode->i_op = &btrfs_dir_ro_inode_operations;
106 inode->i_op = &btrfs_dir_inode_operations;
109 inode->i_op = &btrfs_symlink_inode_operations;
110 inode->i_mapping->a_ops = &btrfs_symlink_aops;
116 btrfs_release_path(root, path);
117 btrfs_free_path(path);
118 mutex_unlock(&root->fs_info->fs_mutex);
119 make_bad_inode(inode);
122 static void fill_inode_item(struct btrfs_inode_item *item,
125 btrfs_set_inode_uid(item, inode->i_uid);
126 btrfs_set_inode_gid(item, inode->i_gid);
127 btrfs_set_inode_size(item, inode->i_size);
128 btrfs_set_inode_mode(item, inode->i_mode);
129 btrfs_set_inode_nlink(item, inode->i_nlink);
130 btrfs_set_timespec_sec(&item->atime, inode->i_atime.tv_sec);
131 btrfs_set_timespec_nsec(&item->atime, inode->i_atime.tv_nsec);
132 btrfs_set_timespec_sec(&item->mtime, inode->i_mtime.tv_sec);
133 btrfs_set_timespec_nsec(&item->mtime, inode->i_mtime.tv_nsec);
134 btrfs_set_timespec_sec(&item->ctime, inode->i_ctime.tv_sec);
135 btrfs_set_timespec_nsec(&item->ctime, inode->i_ctime.tv_nsec);
136 btrfs_set_inode_nblocks(item, inode->i_blocks);
137 btrfs_set_inode_generation(item, inode->i_generation);
138 btrfs_set_inode_block_group(item,
139 BTRFS_I(inode)->block_group->key.objectid);
142 static int btrfs_update_inode(struct btrfs_trans_handle *trans,
143 struct btrfs_root *root,
146 struct btrfs_inode_item *inode_item;
147 struct btrfs_path *path;
150 path = btrfs_alloc_path();
152 btrfs_init_path(path);
153 ret = btrfs_lookup_inode(trans, root, path,
154 &BTRFS_I(inode)->location, 1);
161 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
163 struct btrfs_inode_item);
165 fill_inode_item(inode_item, inode);
166 btrfs_mark_buffer_dirty(path->nodes[0]);
169 btrfs_release_path(root, path);
170 btrfs_free_path(path);
175 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
176 struct btrfs_root *root,
178 struct dentry *dentry)
180 struct btrfs_path *path;
181 const char *name = dentry->d_name.name;
182 int name_len = dentry->d_name.len;
185 struct btrfs_dir_item *di;
187 path = btrfs_alloc_path();
189 btrfs_init_path(path);
190 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
200 objectid = btrfs_disk_key_objectid(&di->location);
201 ret = btrfs_delete_one_dir_name(trans, root, path, di);
203 btrfs_release_path(root, path);
205 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
206 objectid, name, name_len, -1);
215 ret = btrfs_delete_one_dir_name(trans, root, path, di);
218 dentry->d_inode->i_ctime = dir->i_ctime;
220 btrfs_free_path(path);
222 dir->i_size -= name_len * 2;
223 btrfs_update_inode(trans, root, dir);
224 drop_nlink(dentry->d_inode);
225 btrfs_update_inode(trans, root, dentry->d_inode);
226 dir->i_sb->s_dirt = 1;
231 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
233 struct btrfs_root *root;
234 struct btrfs_trans_handle *trans;
237 root = BTRFS_I(dir)->root;
238 mutex_lock(&root->fs_info->fs_mutex);
239 trans = btrfs_start_transaction(root, 1);
240 btrfs_set_trans_block_group(trans, dir);
241 ret = btrfs_unlink_trans(trans, root, dir, dentry);
242 btrfs_end_transaction(trans, root);
243 mutex_unlock(&root->fs_info->fs_mutex);
244 btrfs_btree_balance_dirty(root);
248 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
250 struct inode *inode = dentry->d_inode;
253 struct btrfs_root *root = BTRFS_I(dir)->root;
254 struct btrfs_path *path;
255 struct btrfs_key key;
256 struct btrfs_trans_handle *trans;
257 struct btrfs_key found_key;
259 struct btrfs_leaf *leaf;
260 char *goodnames = "..";
262 path = btrfs_alloc_path();
264 btrfs_init_path(path);
265 mutex_lock(&root->fs_info->fs_mutex);
266 trans = btrfs_start_transaction(root, 1);
267 btrfs_set_trans_block_group(trans, dir);
268 key.objectid = inode->i_ino;
269 key.offset = (u64)-1;
272 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
278 if (path->slots[0] == 0) {
283 leaf = btrfs_buffer_leaf(path->nodes[0]);
284 btrfs_disk_key_to_cpu(&found_key,
285 &leaf->items[path->slots[0]].key);
286 found_type = btrfs_key_type(&found_key);
287 if (found_key.objectid != inode->i_ino) {
291 if ((found_type != BTRFS_DIR_ITEM_KEY &&
292 found_type != BTRFS_DIR_INDEX_KEY) ||
293 (!btrfs_match_dir_item_name(root, path, goodnames, 2) &&
294 !btrfs_match_dir_item_name(root, path, goodnames, 1))) {
298 ret = btrfs_del_item(trans, root, path);
301 if (found_type == BTRFS_DIR_ITEM_KEY && found_key.offset == 1)
303 btrfs_release_path(root, path);
306 btrfs_release_path(root, path);
308 /* now the directory is empty */
309 err = btrfs_unlink_trans(trans, root, dir, dentry);
314 btrfs_release_path(root, path);
315 btrfs_free_path(path);
316 mutex_unlock(&root->fs_info->fs_mutex);
317 ret = btrfs_end_transaction(trans, root);
318 btrfs_btree_balance_dirty(root);
324 static int btrfs_free_inode(struct btrfs_trans_handle *trans,
325 struct btrfs_root *root,
328 struct btrfs_path *path;
333 path = btrfs_alloc_path();
335 btrfs_init_path(path);
336 ret = btrfs_lookup_inode(trans, root, path,
337 &BTRFS_I(inode)->location, -1);
339 ret = btrfs_del_item(trans, root, path);
341 btrfs_free_path(path);
345 static void reada_truncate(struct btrfs_root *root, struct btrfs_path *path,
348 struct btrfs_node *node;
358 node = btrfs_buffer_node(path->nodes[1]);
359 slot = path->slots[1];
362 nritems = btrfs_header_nritems(&node->header);
363 for (i = slot - 1; i >= 0; i--) {
364 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
365 if (item_objectid != objectid)
367 blocknr = btrfs_node_blockptr(node, i);
368 ret = readahead_tree_block(root, blocknr);
374 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
375 struct btrfs_root *root,
379 struct btrfs_path *path;
380 struct btrfs_key key;
381 struct btrfs_disk_key *found_key;
383 struct btrfs_leaf *leaf;
384 struct btrfs_file_extent_item *fi = NULL;
385 u64 extent_start = 0;
386 u64 extent_num_blocks = 0;
389 path = btrfs_alloc_path();
391 /* FIXME, add redo link to tree so we don't leak on crash */
392 key.objectid = inode->i_ino;
393 key.offset = (u64)-1;
396 btrfs_init_path(path);
397 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
402 BUG_ON(path->slots[0] == 0);
405 reada_truncate(root, path, inode->i_ino);
406 leaf = btrfs_buffer_leaf(path->nodes[0]);
407 found_key = &leaf->items[path->slots[0]].key;
408 found_type = btrfs_disk_key_type(found_key);
409 if (btrfs_disk_key_objectid(found_key) != inode->i_ino)
411 if (found_type != BTRFS_CSUM_ITEM_KEY &&
412 found_type != BTRFS_DIR_ITEM_KEY &&
413 found_type != BTRFS_DIR_INDEX_KEY &&
414 found_type != BTRFS_EXTENT_DATA_KEY)
416 if (btrfs_disk_key_offset(found_key) < inode->i_size)
419 if (btrfs_disk_key_type(found_key) == BTRFS_EXTENT_DATA_KEY) {
420 fi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
422 struct btrfs_file_extent_item);
423 if (btrfs_file_extent_type(fi) !=
424 BTRFS_FILE_EXTENT_INLINE) {
427 btrfs_file_extent_disk_blocknr(fi);
429 btrfs_file_extent_disk_num_blocks(fi);
430 /* FIXME blocksize != 4096 */
431 num_dec = btrfs_file_extent_num_blocks(fi) << 3;
432 if (extent_start != 0) {
434 inode->i_blocks -= num_dec;
438 ret = btrfs_del_item(trans, root, path);
440 btrfs_release_path(root, path);
442 ret = btrfs_free_extent(trans, root, extent_start,
443 extent_num_blocks, 0);
449 btrfs_release_path(root, path);
450 btrfs_free_path(path);
451 inode->i_sb->s_dirt = 1;
455 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
457 struct inode *inode = dentry->d_inode;
460 err = inode_change_ok(inode, attr);
464 if (S_ISREG(inode->i_mode) &&
465 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
466 struct btrfs_trans_handle *trans;
467 struct btrfs_root *root = BTRFS_I(inode)->root;
468 u64 mask = root->blocksize - 1;
469 u64 pos = (inode->i_size + mask) & ~mask;
472 if (attr->ia_size < pos)
474 hole_size = (attr->ia_size - pos + mask) & ~mask;
475 hole_size >>= inode->i_blkbits;
477 mutex_lock(&root->fs_info->fs_mutex);
478 trans = btrfs_start_transaction(root, 1);
479 btrfs_set_trans_block_group(trans, inode);
480 err = btrfs_insert_file_extent(trans, root, inode->i_ino,
481 pos, 0, 0, hole_size);
483 btrfs_end_transaction(trans, root);
484 mutex_unlock(&root->fs_info->fs_mutex);
487 err = inode_setattr(inode, attr);
492 static void btrfs_delete_inode(struct inode *inode)
494 struct btrfs_trans_handle *trans;
495 struct btrfs_root *root = BTRFS_I(inode)->root;
498 truncate_inode_pages(&inode->i_data, 0);
499 if (is_bad_inode(inode)) {
503 mutex_lock(&root->fs_info->fs_mutex);
504 trans = btrfs_start_transaction(root, 1);
505 btrfs_set_trans_block_group(trans, inode);
506 ret = btrfs_truncate_in_trans(trans, root, inode);
508 btrfs_free_inode(trans, root, inode);
509 btrfs_end_transaction(trans, root);
510 mutex_unlock(&root->fs_info->fs_mutex);
511 btrfs_btree_balance_dirty(root);
517 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
518 struct btrfs_key *location)
520 const char *name = dentry->d_name.name;
521 int namelen = dentry->d_name.len;
522 struct btrfs_dir_item *di;
523 struct btrfs_path *path;
524 struct btrfs_root *root = BTRFS_I(dir)->root;
527 path = btrfs_alloc_path();
529 btrfs_init_path(path);
530 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
532 if (!di || IS_ERR(di)) {
533 location->objectid = 0;
537 btrfs_disk_key_to_cpu(location, &di->location);
539 btrfs_release_path(root, path);
540 btrfs_free_path(path);
544 static int fixup_tree_root_location(struct btrfs_root *root,
545 struct btrfs_key *location,
546 struct btrfs_root **sub_root)
548 struct btrfs_path *path;
549 struct btrfs_root_item *ri;
551 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
553 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
556 path = btrfs_alloc_path();
558 mutex_lock(&root->fs_info->fs_mutex);
560 *sub_root = btrfs_read_fs_root(root->fs_info, location);
561 if (IS_ERR(*sub_root))
562 return PTR_ERR(*sub_root);
564 ri = &(*sub_root)->root_item;
565 location->objectid = btrfs_root_dirid(ri);
567 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
568 location->offset = 0;
570 btrfs_free_path(path);
571 mutex_unlock(&root->fs_info->fs_mutex);
575 static int btrfs_init_locked_inode(struct inode *inode, void *p)
577 struct btrfs_iget_args *args = p;
578 inode->i_ino = args->ino;
579 BTRFS_I(inode)->root = args->root;
583 static int btrfs_find_actor(struct inode *inode, void *opaque)
585 struct btrfs_iget_args *args = opaque;
586 return (args->ino == inode->i_ino &&
587 args->root == BTRFS_I(inode)->root);
590 static struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
591 struct btrfs_root *root)
594 struct btrfs_iget_args args;
598 inode = iget5_locked(s, objectid, btrfs_find_actor,
599 btrfs_init_locked_inode,
604 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
605 struct nameidata *nd)
607 struct inode * inode;
608 struct btrfs_inode *bi = BTRFS_I(dir);
609 struct btrfs_root *root = bi->root;
610 struct btrfs_root *sub_root = root;
611 struct btrfs_key location;
614 if (dentry->d_name.len > BTRFS_NAME_LEN)
615 return ERR_PTR(-ENAMETOOLONG);
616 mutex_lock(&root->fs_info->fs_mutex);
617 ret = btrfs_inode_by_name(dir, dentry, &location);
618 mutex_unlock(&root->fs_info->fs_mutex);
622 if (location.objectid) {
623 ret = fixup_tree_root_location(root, &location, &sub_root);
627 return ERR_PTR(-ENOENT);
628 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
631 return ERR_PTR(-EACCES);
632 if (inode->i_state & I_NEW) {
633 if (sub_root != root) {
634 printk("adding new root for inode %lu root %p (found %p)\n", inode->i_ino, sub_root, BTRFS_I(inode)->root);
636 sub_root->inode = inode;
638 BTRFS_I(inode)->root = sub_root;
639 memcpy(&BTRFS_I(inode)->location, &location,
641 btrfs_read_locked_inode(inode);
642 unlock_new_inode(inode);
645 return d_splice_alias(inode, dentry);
648 static void reada_leaves(struct btrfs_root *root, struct btrfs_path *path,
651 struct btrfs_node *node;
661 node = btrfs_buffer_node(path->nodes[1]);
662 slot = path->slots[1];
663 nritems = btrfs_header_nritems(&node->header);
664 for (i = slot + 1; i < nritems; i++) {
665 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
666 if (item_objectid != objectid)
668 blocknr = btrfs_node_blockptr(node, i);
669 ret = readahead_tree_block(root, blocknr);
675 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
677 struct inode *inode = filp->f_path.dentry->d_inode;
678 struct btrfs_root *root = BTRFS_I(inode)->root;
679 struct btrfs_item *item;
680 struct btrfs_dir_item *di;
681 struct btrfs_key key;
682 struct btrfs_path *path;
685 struct btrfs_leaf *leaf;
688 unsigned char d_type = DT_UNKNOWN;
693 int key_type = BTRFS_DIR_INDEX_KEY;
695 /* FIXME, use a real flag for deciding about the key type */
696 if (root->fs_info->tree_root == root)
697 key_type = BTRFS_DIR_ITEM_KEY;
698 mutex_lock(&root->fs_info->fs_mutex);
699 key.objectid = inode->i_ino;
701 btrfs_set_key_type(&key, key_type);
702 key.offset = filp->f_pos;
703 path = btrfs_alloc_path();
704 btrfs_init_path(path);
705 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
709 reada_leaves(root, path, inode->i_ino);
711 leaf = btrfs_buffer_leaf(path->nodes[0]);
712 nritems = btrfs_header_nritems(&leaf->header);
713 slot = path->slots[0];
714 if (advance || slot >= nritems) {
715 if (slot >= nritems -1) {
716 reada_leaves(root, path, inode->i_ino);
717 ret = btrfs_next_leaf(root, path);
720 leaf = btrfs_buffer_leaf(path->nodes[0]);
721 nritems = btrfs_header_nritems(&leaf->header);
722 slot = path->slots[0];
729 item = leaf->items + slot;
730 if (btrfs_disk_key_objectid(&item->key) != key.objectid)
732 if (btrfs_disk_key_type(&item->key) != key_type)
734 if (btrfs_disk_key_offset(&item->key) < filp->f_pos)
736 filp->f_pos = btrfs_disk_key_offset(&item->key);
738 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
740 di_total = btrfs_item_size(leaf->items + slot);
741 while(di_cur < di_total) {
742 over = filldir(dirent, (const char *)(di + 1),
743 btrfs_dir_name_len(di),
744 btrfs_disk_key_offset(&item->key),
745 btrfs_disk_key_objectid(&di->location),
749 di_len = btrfs_dir_name_len(di) + sizeof(*di);
751 di = (struct btrfs_dir_item *)((char *)di + di_len);
758 btrfs_release_path(root, path);
759 btrfs_free_path(path);
760 mutex_unlock(&root->fs_info->fs_mutex);
764 static void btrfs_put_super (struct super_block * sb)
766 struct btrfs_root *root = btrfs_sb(sb);
769 ret = close_ctree(root);
771 printk("close ctree returns %d\n", ret);
773 sb->s_fs_info = NULL;
776 static int btrfs_fill_super(struct super_block * sb, void * data, int silent)
778 struct inode * inode;
779 struct dentry * root_dentry;
780 struct btrfs_super_block *disk_super;
781 struct btrfs_root *tree_root;
782 struct btrfs_inode *bi;
784 sb->s_maxbytes = MAX_LFS_FILESIZE;
785 sb->s_magic = BTRFS_SUPER_MAGIC;
786 sb->s_op = &btrfs_super_ops;
789 tree_root = open_ctree(sb);
792 printk("btrfs: open_ctree failed\n");
795 sb->s_fs_info = tree_root;
796 disk_super = tree_root->fs_info->disk_super;
797 printk("read in super total blocks %Lu root %Lu\n",
798 btrfs_super_total_blocks(disk_super),
799 btrfs_super_root_dir(disk_super));
801 inode = btrfs_iget_locked(sb, btrfs_super_root_dir(disk_super),
804 bi->location.objectid = inode->i_ino;
805 bi->location.offset = 0;
806 bi->location.flags = 0;
807 bi->root = tree_root;
808 btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
812 if (inode->i_state & I_NEW) {
813 btrfs_read_locked_inode(inode);
814 unlock_new_inode(inode);
817 root_dentry = d_alloc_root(inode);
822 sb->s_root = root_dentry;
827 static int btrfs_write_inode(struct inode *inode, int wait)
829 struct btrfs_root *root = BTRFS_I(inode)->root;
830 struct btrfs_trans_handle *trans;
834 mutex_lock(&root->fs_info->fs_mutex);
835 trans = btrfs_start_transaction(root, 1);
836 btrfs_set_trans_block_group(trans, inode);
837 ret = btrfs_commit_transaction(trans, root);
838 mutex_unlock(&root->fs_info->fs_mutex);
843 static void btrfs_dirty_inode(struct inode *inode)
845 struct btrfs_root *root = BTRFS_I(inode)->root;
846 struct btrfs_trans_handle *trans;
848 mutex_lock(&root->fs_info->fs_mutex);
849 trans = btrfs_start_transaction(root, 1);
850 btrfs_set_trans_block_group(trans, inode);
851 btrfs_update_inode(trans, root, inode);
852 btrfs_end_transaction(trans, root);
853 mutex_unlock(&root->fs_info->fs_mutex);
854 btrfs_btree_balance_dirty(root);
857 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
858 struct btrfs_root *root,
860 struct btrfs_block_group_cache *group,
864 struct btrfs_inode_item inode_item;
865 struct btrfs_key *location;
869 inode = new_inode(root->fs_info->sb);
871 return ERR_PTR(-ENOMEM);
873 BTRFS_I(inode)->root = root;
878 group = btrfs_find_block_group(root, group, 0, 0, owner);
879 BTRFS_I(inode)->block_group = group;
881 inode->i_uid = current->fsuid;
882 inode->i_gid = current->fsgid;
883 inode->i_mode = mode;
884 inode->i_ino = objectid;
886 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
887 fill_inode_item(&inode_item, inode);
888 location = &BTRFS_I(inode)->location;
889 location->objectid = objectid;
891 location->offset = 0;
892 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
894 ret = btrfs_insert_inode(trans, root, objectid, &inode_item);
897 insert_inode_hash(inode);
901 static int btrfs_add_link(struct btrfs_trans_handle *trans,
902 struct dentry *dentry, struct inode *inode)
905 struct btrfs_key key;
906 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
907 key.objectid = inode->i_ino;
909 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
912 ret = btrfs_insert_dir_item(trans, root,
913 dentry->d_name.name, dentry->d_name.len,
914 dentry->d_parent->d_inode->i_ino,
917 dentry->d_parent->d_inode->i_size += dentry->d_name.len * 2;
918 ret = btrfs_update_inode(trans, root,
919 dentry->d_parent->d_inode);
924 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
925 struct dentry *dentry, struct inode *inode)
927 int err = btrfs_add_link(trans, dentry, inode);
929 d_instantiate(dentry, inode);
937 static int btrfs_create(struct inode *dir, struct dentry *dentry,
938 int mode, struct nameidata *nd)
940 struct btrfs_trans_handle *trans;
941 struct btrfs_root *root = BTRFS_I(dir)->root;
947 mutex_lock(&root->fs_info->fs_mutex);
948 trans = btrfs_start_transaction(root, 1);
949 btrfs_set_trans_block_group(trans, dir);
951 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
957 inode = btrfs_new_inode(trans, root, objectid,
958 BTRFS_I(dir)->block_group, mode);
959 err = PTR_ERR(inode);
963 btrfs_set_trans_block_group(trans, inode);
964 err = btrfs_add_nondir(trans, dentry, inode);
968 inode->i_mapping->a_ops = &btrfs_aops;
969 inode->i_fop = &btrfs_file_operations;
970 inode->i_op = &btrfs_file_inode_operations;
972 dir->i_sb->s_dirt = 1;
973 btrfs_update_inode_block_group(trans, inode);
974 btrfs_update_inode_block_group(trans, dir);
976 btrfs_end_transaction(trans, root);
977 mutex_unlock(&root->fs_info->fs_mutex);
980 inode_dec_link_count(inode);
983 btrfs_btree_balance_dirty(root);
987 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
988 struct dentry *dentry)
990 struct btrfs_trans_handle *trans;
991 struct btrfs_root *root = BTRFS_I(dir)->root;
992 struct inode *inode = old_dentry->d_inode;
996 if (inode->i_nlink == 0)
1000 mutex_lock(&root->fs_info->fs_mutex);
1001 trans = btrfs_start_transaction(root, 1);
1002 btrfs_set_trans_block_group(trans, dir);
1003 atomic_inc(&inode->i_count);
1004 err = btrfs_add_nondir(trans, dentry, inode);
1007 dir->i_sb->s_dirt = 1;
1008 btrfs_update_inode_block_group(trans, dir);
1009 btrfs_update_inode(trans, root, inode);
1011 btrfs_end_transaction(trans, root);
1012 mutex_unlock(&root->fs_info->fs_mutex);
1015 inode_dec_link_count(inode);
1018 btrfs_btree_balance_dirty(root);
1022 static int btrfs_make_empty_dir(struct btrfs_trans_handle *trans,
1023 struct btrfs_root *root,
1024 u64 objectid, u64 dirid)
1028 struct btrfs_key key;
1033 key.objectid = objectid;
1036 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1038 ret = btrfs_insert_dir_item(trans, root, buf, 1, objectid,
1042 key.objectid = dirid;
1043 ret = btrfs_insert_dir_item(trans, root, buf, 2, objectid,
1051 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1053 struct inode *inode;
1054 struct btrfs_trans_handle *trans;
1055 struct btrfs_root *root = BTRFS_I(dir)->root;
1057 int drop_on_err = 0;
1060 mutex_lock(&root->fs_info->fs_mutex);
1061 trans = btrfs_start_transaction(root, 1);
1062 btrfs_set_trans_block_group(trans, dir);
1063 if (IS_ERR(trans)) {
1064 err = PTR_ERR(trans);
1068 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1074 inode = btrfs_new_inode(trans, root, objectid,
1075 BTRFS_I(dir)->block_group, S_IFDIR | mode);
1076 if (IS_ERR(inode)) {
1077 err = PTR_ERR(inode);
1081 inode->i_op = &btrfs_dir_inode_operations;
1082 inode->i_fop = &btrfs_dir_file_operations;
1083 btrfs_set_trans_block_group(trans, inode);
1085 err = btrfs_make_empty_dir(trans, root, inode->i_ino, dir->i_ino);
1090 err = btrfs_update_inode(trans, root, inode);
1093 err = btrfs_add_link(trans, dentry, inode);
1096 d_instantiate(dentry, inode);
1098 dir->i_sb->s_dirt = 1;
1099 btrfs_update_inode_block_group(trans, inode);
1100 btrfs_update_inode_block_group(trans, dir);
1103 btrfs_end_transaction(trans, root);
1105 mutex_unlock(&root->fs_info->fs_mutex);
1108 btrfs_btree_balance_dirty(root);
1112 static int btrfs_sync_file(struct file *file,
1113 struct dentry *dentry, int datasync)
1115 struct inode *inode = dentry->d_inode;
1116 struct btrfs_root *root = BTRFS_I(inode)->root;
1118 struct btrfs_trans_handle *trans;
1120 mutex_lock(&root->fs_info->fs_mutex);
1121 trans = btrfs_start_transaction(root, 1);
1126 ret = btrfs_commit_transaction(trans, root);
1127 mutex_unlock(&root->fs_info->fs_mutex);
1129 return ret > 0 ? EIO : ret;
1132 static int btrfs_sync_fs(struct super_block *sb, int wait)
1134 struct btrfs_trans_handle *trans;
1135 struct btrfs_root *root;
1137 root = btrfs_sb(sb);
1141 filemap_flush(root->fs_info->btree_inode->i_mapping);
1144 mutex_lock(&root->fs_info->fs_mutex);
1145 trans = btrfs_start_transaction(root, 1);
1146 ret = btrfs_commit_transaction(trans, root);
1149 printk("btrfs sync_fs\n");
1150 mutex_unlock(&root->fs_info->fs_mutex);
1154 static int btrfs_get_block_lock(struct inode *inode, sector_t iblock,
1155 struct buffer_head *result, int create)
1160 u64 extent_start = 0;
1162 u64 objectid = inode->i_ino;
1164 struct btrfs_path *path;
1165 struct btrfs_root *root = BTRFS_I(inode)->root;
1166 struct btrfs_file_extent_item *item;
1167 struct btrfs_leaf *leaf;
1168 struct btrfs_disk_key *found_key;
1170 path = btrfs_alloc_path();
1172 btrfs_init_path(path);
1177 ret = btrfs_lookup_file_extent(NULL, root, path,
1179 iblock << inode->i_blkbits, 0);
1186 if (path->slots[0] == 0) {
1187 btrfs_release_path(root, path);
1193 item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
1194 struct btrfs_file_extent_item);
1195 leaf = btrfs_buffer_leaf(path->nodes[0]);
1196 blocknr = btrfs_file_extent_disk_blocknr(item);
1197 blocknr += btrfs_file_extent_offset(item);
1199 /* are we inside the extent that was found? */
1200 found_key = &leaf->items[path->slots[0]].key;
1201 found_type = btrfs_disk_key_type(found_key);
1202 if (btrfs_disk_key_objectid(found_key) != objectid ||
1203 found_type != BTRFS_EXTENT_DATA_KEY) {
1208 found_type = btrfs_file_extent_type(item);
1209 extent_start = btrfs_disk_key_offset(&leaf->items[path->slots[0]].key);
1210 if (found_type == BTRFS_FILE_EXTENT_REG) {
1211 extent_start = extent_start >> inode->i_blkbits;
1212 extent_end = extent_start + btrfs_file_extent_num_blocks(item);
1216 if (iblock >= extent_start && iblock < extent_end) {
1217 btrfs_map_bh_to_logical(root, result, blocknr +
1218 iblock - extent_start);
1221 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1225 size = btrfs_file_extent_inline_len(leaf->items +
1227 extent_end = (extent_start + size) >> inode->i_blkbits;
1228 extent_start >>= inode->i_blkbits;
1229 if (iblock < extent_start || iblock > extent_end) {
1232 ptr = btrfs_file_extent_inline_start(item);
1233 map = kmap(result->b_page);
1234 memcpy(map, ptr, size);
1235 memset(map + size, 0, PAGE_CACHE_SIZE - size);
1236 flush_dcache_page(result->b_page);
1237 kunmap(result->b_page);
1238 set_buffer_uptodate(result);
1239 SetPageChecked(result->b_page);
1240 btrfs_map_bh_to_logical(root, result, 0);
1243 btrfs_free_path(path);
1247 static int btrfs_get_block(struct inode *inode, sector_t iblock,
1248 struct buffer_head *result, int create)
1251 struct btrfs_root *root = BTRFS_I(inode)->root;
1252 mutex_lock(&root->fs_info->fs_mutex);
1253 err = btrfs_get_block_lock(inode, iblock, result, create);
1254 mutex_unlock(&root->fs_info->fs_mutex);
1258 static int btrfs_prepare_write(struct file *file, struct page *page,
1259 unsigned from, unsigned to)
1261 return nobh_prepare_write(page, from, to, btrfs_get_block);
1264 static void btrfs_write_super(struct super_block *sb)
1266 btrfs_sync_fs(sb, 1);
1269 static int btrfs_readpage(struct file *file, struct page *page)
1271 return mpage_readpage(page, btrfs_get_block);
1275 * While block_write_full_page is writing back the dirty buffers under
1276 * the page lock, whoever dirtied the buffers may decide to clean them
1277 * again at any time. We handle that by only looking at the buffer
1278 * state inside lock_buffer().
1280 * If block_write_full_page() is called for regular writeback
1281 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1282 * locked buffer. This only can happen if someone has written the buffer
1283 * directly, with submit_bh(). At the address_space level PageWriteback
1284 * prevents this contention from occurring.
1286 static int __btrfs_write_full_page(struct inode *inode, struct page *page,
1287 struct writeback_control *wbc)
1291 sector_t last_block;
1292 struct buffer_head *bh, *head;
1293 const unsigned blocksize = 1 << inode->i_blkbits;
1294 int nr_underway = 0;
1296 BUG_ON(!PageLocked(page));
1298 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1300 if (!page_has_buffers(page)) {
1301 create_empty_buffers(page, blocksize,
1302 (1 << BH_Dirty)|(1 << BH_Uptodate));
1306 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1307 * here, and the (potentially unmapped) buffers may become dirty at
1308 * any time. If a buffer becomes dirty here after we've inspected it
1309 * then we just miss that fact, and the page stays dirty.
1311 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1312 * handle that here by just cleaning them.
1315 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1316 head = page_buffers(page);
1320 * Get all the dirty buffers mapped to disk addresses and
1321 * handle any aliases from the underlying blockdev's mapping.
1324 if (block > last_block) {
1326 * mapped buffers outside i_size will occur, because
1327 * this page can be outside i_size when there is a
1328 * truncate in progress.
1331 * The buffer was zeroed by block_write_full_page()
1333 clear_buffer_dirty(bh);
1334 set_buffer_uptodate(bh);
1335 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1336 WARN_ON(bh->b_size != blocksize);
1337 err = btrfs_get_block(inode, block, bh, 0);
1339 printk("writepage going to recovery err %d\n", err);
1342 if (buffer_new(bh)) {
1343 /* blockdev mappings never come here */
1344 clear_buffer_new(bh);
1347 bh = bh->b_this_page;
1349 } while (bh != head);
1352 if (!buffer_mapped(bh))
1355 * If it's a fully non-blocking write attempt and we cannot
1356 * lock the buffer then redirty the page. Note that this can
1357 * potentially cause a busy-wait loop from pdflush and kswapd
1358 * activity, but those code paths have their own higher-level
1361 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1363 } else if (test_set_buffer_locked(bh)) {
1364 redirty_page_for_writepage(wbc, page);
1367 if (test_clear_buffer_dirty(bh) && bh->b_blocknr != 0) {
1368 mark_buffer_async_write(bh);
1372 } while ((bh = bh->b_this_page) != head);
1375 * The page and its buffers are protected by PageWriteback(), so we can
1376 * drop the bh refcounts early.
1378 BUG_ON(PageWriteback(page));
1379 set_page_writeback(page);
1382 struct buffer_head *next = bh->b_this_page;
1383 if (buffer_async_write(bh)) {
1384 submit_bh(WRITE, bh);
1388 } while (bh != head);
1393 if (nr_underway == 0) {
1395 * The page was marked dirty, but the buffers were
1396 * clean. Someone wrote them back by hand with
1397 * ll_rw_block/submit_bh. A rare case.
1401 if (!buffer_uptodate(bh)) {
1405 bh = bh->b_this_page;
1406 } while (bh != head);
1408 SetPageUptodate(page);
1409 end_page_writeback(page);
1415 * ENOSPC, or some other error. We may already have added some
1416 * blocks to the file, so we need to write these out to avoid
1417 * exposing stale data.
1418 * The page is currently locked and not marked for writeback
1421 /* Recovery: lock and submit the mapped buffers */
1423 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1425 mark_buffer_async_write(bh);
1428 * The buffer may have been set dirty during
1429 * attachment to a dirty page.
1431 clear_buffer_dirty(bh);
1433 } while ((bh = bh->b_this_page) != head);
1435 BUG_ON(PageWriteback(page));
1436 set_page_writeback(page);
1438 struct buffer_head *next = bh->b_this_page;
1439 if (buffer_async_write(bh)) {
1440 clear_buffer_dirty(bh);
1441 submit_bh(WRITE, bh);
1445 } while (bh != head);
1451 * The generic ->writepage function for buffer-backed address_spaces
1453 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
1455 struct inode * const inode = page->mapping->host;
1456 loff_t i_size = i_size_read(inode);
1457 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1461 /* Is the page fully inside i_size? */
1462 if (page->index < end_index)
1463 return __btrfs_write_full_page(inode, page, wbc);
1465 /* Is the page fully outside i_size? (truncate in progress) */
1466 offset = i_size & (PAGE_CACHE_SIZE-1);
1467 if (page->index >= end_index+1 || !offset) {
1469 * The page may have dirty, unmapped buffers. For example,
1470 * they may have been added in ext3_writepage(). Make them
1471 * freeable here, so the page does not leak.
1473 block_invalidatepage(page, 0);
1475 return 0; /* don't care */
1479 * The page straddles i_size. It must be zeroed out on each and every
1480 * writepage invokation because it may be mmapped. "A file is mapped
1481 * in multiples of the page size. For a file that is not a multiple of
1482 * the page size, the remaining memory is zeroed when mapped, and
1483 * writes to that region are not written out to the file."
1485 kaddr = kmap_atomic(page, KM_USER0);
1486 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
1487 flush_dcache_page(page);
1488 kunmap_atomic(kaddr, KM_USER0);
1489 return __btrfs_write_full_page(inode, page, wbc);
1492 static void btrfs_truncate(struct inode *inode)
1494 struct btrfs_root *root = BTRFS_I(inode)->root;
1496 struct btrfs_trans_handle *trans;
1498 if (!S_ISREG(inode->i_mode))
1500 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1503 nobh_truncate_page(inode->i_mapping, inode->i_size);
1505 /* FIXME, add redo link to tree so we don't leak on crash */
1506 mutex_lock(&root->fs_info->fs_mutex);
1507 trans = btrfs_start_transaction(root, 1);
1508 btrfs_set_trans_block_group(trans, inode);
1509 ret = btrfs_truncate_in_trans(trans, root, inode);
1511 btrfs_update_inode(trans, root, inode);
1512 ret = btrfs_end_transaction(trans, root);
1514 mutex_unlock(&root->fs_info->fs_mutex);
1515 btrfs_btree_balance_dirty(root);
1519 * Make sure any changes to nobh_commit_write() are reflected in
1520 * nobh_truncate_page(), since it doesn't call commit_write().
1522 static int btrfs_commit_write(struct file *file, struct page *page,
1523 unsigned from, unsigned to)
1525 struct inode *inode = page->mapping->host;
1526 struct buffer_head *bh;
1527 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1529 SetPageUptodate(page);
1530 bh = page_buffers(page);
1531 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
1532 set_page_dirty(page);
1534 if (pos > inode->i_size) {
1535 i_size_write(inode, pos);
1536 mark_inode_dirty(inode);
1541 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
1542 struct page **prepared_pages,
1543 const char __user * buf)
1545 long page_fault = 0;
1547 int offset = pos & (PAGE_CACHE_SIZE - 1);
1549 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
1550 size_t count = min_t(size_t,
1551 PAGE_CACHE_SIZE - offset, write_bytes);
1552 struct page *page = prepared_pages[i];
1553 fault_in_pages_readable(buf, count);
1555 /* Copy data from userspace to the current page */
1557 page_fault = __copy_from_user(page_address(page) + offset,
1559 /* Flush processor's dcache for this page */
1560 flush_dcache_page(page);
1563 write_bytes -= count;
1568 return page_fault ? -EFAULT : 0;
1571 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
1574 for (i = 0; i < num_pages; i++) {
1577 unlock_page(pages[i]);
1578 mark_page_accessed(pages[i]);
1579 page_cache_release(pages[i]);
1582 static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
1583 struct btrfs_root *root,
1585 struct page **pages,
1595 struct inode *inode = file->f_path.dentry->d_inode;
1596 struct buffer_head *bh;
1597 struct btrfs_file_extent_item *ei;
1599 for (i = 0; i < num_pages; i++) {
1600 offset = pos & (PAGE_CACHE_SIZE -1);
1601 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1602 /* FIXME, one block at a time */
1604 mutex_lock(&root->fs_info->fs_mutex);
1605 trans = btrfs_start_transaction(root, 1);
1606 btrfs_set_trans_block_group(trans, inode);
1608 bh = page_buffers(pages[i]);
1609 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
1610 struct btrfs_key key;
1611 struct btrfs_path *path;
1615 path = btrfs_alloc_path();
1617 key.objectid = inode->i_ino;
1618 key.offset = pages[i]->index << PAGE_CACHE_SHIFT;
1620 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
1621 BUG_ON(write_bytes >= PAGE_CACHE_SIZE);
1623 btrfs_file_extent_calc_inline_size(write_bytes);
1624 ret = btrfs_insert_empty_item(trans, root, path, &key,
1627 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
1628 path->slots[0], struct btrfs_file_extent_item);
1629 btrfs_set_file_extent_generation(ei, trans->transid);
1630 btrfs_set_file_extent_type(ei,
1631 BTRFS_FILE_EXTENT_INLINE);
1632 ptr = btrfs_file_extent_inline_start(ei);
1633 btrfs_memcpy(root, path->nodes[0]->b_data,
1634 ptr, bh->b_data, offset + write_bytes);
1635 mark_buffer_dirty(path->nodes[0]);
1636 btrfs_free_path(path);
1637 } else if (buffer_mapped(bh)) {
1638 btrfs_csum_file_block(trans, root, inode->i_ino,
1639 pages[i]->index << PAGE_CACHE_SHIFT,
1640 kmap(pages[i]), PAGE_CACHE_SIZE);
1643 SetPageChecked(pages[i]);
1644 // btrfs_update_inode_block_group(trans, inode);
1645 ret = btrfs_end_transaction(trans, root);
1647 mutex_unlock(&root->fs_info->fs_mutex);
1649 ret = btrfs_commit_write(file, pages[i], offset,
1650 offset + this_write);
1656 WARN_ON(this_write > write_bytes);
1657 write_bytes -= this_write;
1663 static int drop_extents(struct btrfs_trans_handle *trans,
1664 struct btrfs_root *root,
1665 struct inode *inode,
1666 u64 start, u64 end, u64 *hint_block)
1669 struct btrfs_key key;
1670 struct btrfs_leaf *leaf;
1672 struct btrfs_file_extent_item *extent;
1675 struct btrfs_file_extent_item old;
1676 struct btrfs_path *path;
1677 u64 search_start = start;
1683 path = btrfs_alloc_path();
1687 btrfs_release_path(root, path);
1688 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1693 if (path->slots[0] == 0) {
1704 leaf = btrfs_buffer_leaf(path->nodes[0]);
1705 slot = path->slots[0];
1706 btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
1707 if (key.offset >= end || key.objectid != inode->i_ino) {
1711 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) {
1715 extent = btrfs_item_ptr(leaf, slot,
1716 struct btrfs_file_extent_item);
1717 found_type = btrfs_file_extent_type(extent);
1718 if (found_type == BTRFS_FILE_EXTENT_REG) {
1719 extent_end = key.offset +
1720 (btrfs_file_extent_num_blocks(extent) <<
1723 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1725 extent_end = key.offset +
1726 btrfs_file_extent_inline_len(leaf->items + slot);
1729 if (!found_extent && !found_inline) {
1734 if (search_start >= extent_end) {
1740 u64 mask = root->blocksize - 1;
1741 search_start = (extent_end + mask) & ~mask;
1743 search_start = extent_end;
1745 if (end < extent_end && end >= key.offset) {
1748 btrfs_file_extent_disk_blocknr(extent);
1749 u64 disk_num_blocks =
1750 btrfs_file_extent_disk_num_blocks(extent);
1751 memcpy(&old, extent, sizeof(old));
1752 if (disk_blocknr != 0) {
1753 ret = btrfs_inc_extent_ref(trans, root,
1754 disk_blocknr, disk_num_blocks);
1758 WARN_ON(found_inline);
1762 if (start > key.offset) {
1765 /* truncate existing extent */
1767 WARN_ON(start & (root->blocksize - 1));
1769 new_num = (start - key.offset) >>
1771 old_num = btrfs_file_extent_num_blocks(extent);
1773 btrfs_file_extent_disk_blocknr(extent);
1774 if (btrfs_file_extent_disk_blocknr(extent)) {
1776 (old_num - new_num) << 3;
1778 btrfs_set_file_extent_num_blocks(extent,
1780 mark_buffer_dirty(path->nodes[0]);
1786 u64 disk_blocknr = 0;
1787 u64 disk_num_blocks = 0;
1788 u64 extent_num_blocks = 0;
1791 btrfs_file_extent_disk_blocknr(extent);
1793 btrfs_file_extent_disk_num_blocks(extent);
1795 btrfs_file_extent_num_blocks(extent);
1797 btrfs_file_extent_disk_blocknr(extent);
1799 ret = btrfs_del_item(trans, root, path);
1801 btrfs_release_path(root, path);
1803 if (found_extent && disk_blocknr != 0) {
1804 inode->i_blocks -= extent_num_blocks << 3;
1805 ret = btrfs_free_extent(trans, root,
1807 disk_num_blocks, 0);
1811 if (!bookend && search_start >= end) {
1818 if (bookend && found_extent) {
1819 /* create bookend */
1820 struct btrfs_key ins;
1821 ins.objectid = inode->i_ino;
1824 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
1826 btrfs_release_path(root, path);
1827 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1830 extent = btrfs_item_ptr(
1831 btrfs_buffer_leaf(path->nodes[0]),
1833 struct btrfs_file_extent_item);
1834 btrfs_set_file_extent_disk_blocknr(extent,
1835 btrfs_file_extent_disk_blocknr(&old));
1836 btrfs_set_file_extent_disk_num_blocks(extent,
1837 btrfs_file_extent_disk_num_blocks(&old));
1839 btrfs_set_file_extent_offset(extent,
1840 btrfs_file_extent_offset(&old) +
1841 ((end - key.offset) >> inode->i_blkbits));
1842 WARN_ON(btrfs_file_extent_num_blocks(&old) <
1843 (extent_end - end) >> inode->i_blkbits);
1844 btrfs_set_file_extent_num_blocks(extent,
1845 (extent_end - end) >> inode->i_blkbits);
1847 btrfs_set_file_extent_type(extent,
1848 BTRFS_FILE_EXTENT_REG);
1849 btrfs_set_file_extent_generation(extent,
1850 btrfs_file_extent_generation(&old));
1851 btrfs_mark_buffer_dirty(path->nodes[0]);
1852 if (btrfs_file_extent_disk_blocknr(&old) != 0) {
1854 btrfs_file_extent_num_blocks(extent) << 3;
1861 btrfs_free_path(path);
1865 static int prepare_pages(struct btrfs_root *root,
1867 struct page **pages,
1870 unsigned long first_index,
1871 unsigned long last_index,
1873 u64 alloc_extent_start)
1876 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1877 struct inode *inode = file->f_path.dentry->d_inode;
1881 struct buffer_head *bh;
1882 struct buffer_head *head;
1883 loff_t isize = i_size_read(inode);
1885 memset(pages, 0, num_pages * sizeof(struct page *));
1887 for (i = 0; i < num_pages; i++) {
1888 pages[i] = grab_cache_page(inode->i_mapping, index + i);
1891 goto failed_release;
1893 cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
1894 wait_on_page_writeback(pages[i]);
1895 offset = pos & (PAGE_CACHE_SIZE -1);
1896 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1897 if (!page_has_buffers(pages[i])) {
1898 create_empty_buffers(pages[i],
1899 root->fs_info->sb->s_blocksize,
1900 (1 << BH_Uptodate));
1902 head = page_buffers(pages[i]);
1905 err = btrfs_map_bh_to_logical(root, bh,
1906 alloc_extent_start);
1909 goto failed_truncate;
1910 bh = bh->b_this_page;
1911 if (alloc_extent_start)
1912 alloc_extent_start++;
1913 } while (bh != head);
1915 WARN_ON(this_write > write_bytes);
1916 write_bytes -= this_write;
1921 btrfs_drop_pages(pages, num_pages);
1925 btrfs_drop_pages(pages, num_pages);
1927 vmtruncate(inode, isize);
1931 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1932 size_t count, loff_t *ppos)
1935 size_t num_written = 0;
1938 struct inode *inode = file->f_path.dentry->d_inode;
1939 struct btrfs_root *root = BTRFS_I(inode)->root;
1940 struct page *pages[8];
1941 struct page *pinned[2];
1942 unsigned long first_index;
1943 unsigned long last_index;
1946 u64 alloc_extent_start;
1948 struct btrfs_trans_handle *trans;
1949 struct btrfs_key ins;
1952 if (file->f_flags & O_DIRECT)
1955 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1956 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1957 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1962 err = remove_suid(file->f_path.dentry);
1965 file_update_time(file);
1967 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1968 num_blocks = (count + pos - start_pos + root->blocksize - 1) >>
1971 mutex_lock(&inode->i_mutex);
1972 first_index = pos >> PAGE_CACHE_SHIFT;
1973 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1975 if ((first_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1976 (pos & (PAGE_CACHE_SIZE - 1))) {
1977 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1978 if (!PageUptodate(pinned[0])) {
1979 ret = mpage_readpage(pinned[0], btrfs_get_block);
1981 wait_on_page_locked(pinned[0]);
1983 unlock_page(pinned[0]);
1986 if (first_index != last_index &&
1987 (last_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1988 pos + count < inode->i_size &&
1989 (count & (PAGE_CACHE_SIZE - 1))) {
1990 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1991 if (!PageUptodate(pinned[1])) {
1992 ret = mpage_readpage(pinned[1], btrfs_get_block);
1994 wait_on_page_locked(pinned[1]);
1996 unlock_page(pinned[1]);
2000 mutex_lock(&root->fs_info->fs_mutex);
2001 trans = btrfs_start_transaction(root, 1);
2004 mutex_unlock(&root->fs_info->fs_mutex);
2007 btrfs_set_trans_block_group(trans, inode);
2008 /* FIXME blocksize != 4096 */
2009 inode->i_blocks += num_blocks << 3;
2011 if (start_pos < inode->i_size) {
2012 /* FIXME blocksize != pagesize */
2013 ret = drop_extents(trans, root, inode,
2015 (pos + count + root->blocksize -1) &
2016 ~((u64)root->blocksize - 1), &hint_block);
2019 if (inode->i_size < start_pos) {
2020 u64 last_pos_in_file;
2022 u64 mask = root->blocksize - 1;
2023 last_pos_in_file = (inode->i_size + mask) & ~mask;
2024 hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
2025 hole_size >>= inode->i_blkbits;
2026 if (last_pos_in_file < start_pos) {
2027 ret = btrfs_insert_file_extent(trans, root,
2034 if (inode->i_size >= PAGE_CACHE_SIZE || pos + count < inode->i_size ||
2035 pos + count - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
2036 ret = btrfs_alloc_extent(trans, root, inode->i_ino,
2037 num_blocks, hint_block, (u64)-1,
2040 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
2041 start_pos, ins.objectid, ins.offset,
2049 alloc_extent_start = ins.objectid;
2050 // btrfs_update_inode_block_group(trans, inode);
2051 ret = btrfs_end_transaction(trans, root);
2052 mutex_unlock(&root->fs_info->fs_mutex);
2055 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
2056 size_t write_bytes = min(count, PAGE_CACHE_SIZE - offset);
2057 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
2060 memset(pages, 0, sizeof(pages));
2061 ret = prepare_pages(root, file, pages, num_pages,
2062 pos, first_index, last_index,
2063 write_bytes, alloc_extent_start);
2066 /* FIXME blocks != pagesize */
2067 if (alloc_extent_start)
2068 alloc_extent_start += num_pages;
2069 ret = btrfs_copy_from_user(pos, num_pages,
2070 write_bytes, pages, buf);
2073 ret = dirty_and_release_pages(NULL, root, file, pages,
2074 num_pages, pos, write_bytes);
2076 btrfs_drop_pages(pages, num_pages);
2079 count -= write_bytes;
2081 num_written += write_bytes;
2083 balance_dirty_pages_ratelimited(inode->i_mapping);
2084 btrfs_btree_balance_dirty(root);
2088 mutex_unlock(&inode->i_mutex);
2091 page_cache_release(pinned[0]);
2093 page_cache_release(pinned[1]);
2095 current->backing_dev_info = NULL;
2096 mark_inode_dirty(inode);
2097 return num_written ? num_written : err;
2100 static int btrfs_read_actor(read_descriptor_t *desc, struct page *page,
2101 unsigned long offset, unsigned long size)
2104 unsigned long left, count = desc->count;
2105 struct inode *inode = page->mapping->host;
2110 if (!PageChecked(page)) {
2111 /* FIXME, do it per block */
2112 struct btrfs_root *root = BTRFS_I(inode)->root;
2114 int ret = btrfs_csum_verify_file_block(root,
2115 page->mapping->host->i_ino,
2116 page->index << PAGE_CACHE_SHIFT,
2117 kmap(page), PAGE_CACHE_SIZE);
2119 if (ret != -ENOENT) {
2120 printk("failed to verify ino %lu page %lu\n",
2121 page->mapping->host->i_ino,
2123 memset(page_address(page), 0, PAGE_CACHE_SIZE);
2124 flush_dcache_page(page);
2127 SetPageChecked(page);
2131 * Faults on the destination of a read are common, so do it before
2134 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
2135 kaddr = kmap_atomic(page, KM_USER0);
2136 left = __copy_to_user_inatomic(desc->arg.buf,
2137 kaddr + offset, size);
2138 kunmap_atomic(kaddr, KM_USER0);
2143 /* Do it the slow way */
2145 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
2150 desc->error = -EFAULT;
2153 desc->count = count - size;
2154 desc->written += size;
2155 desc->arg.buf += size;
2160 * btrfs_file_aio_read - filesystem read routine
2161 * @iocb: kernel I/O control block
2162 * @iov: io vector request
2163 * @nr_segs: number of segments in the iovec
2164 * @pos: current file position
2166 static ssize_t btrfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2167 unsigned long nr_segs, loff_t pos)
2169 struct file *filp = iocb->ki_filp;
2173 loff_t *ppos = &iocb->ki_pos;
2176 for (seg = 0; seg < nr_segs; seg++) {
2177 const struct iovec *iv = &iov[seg];
2180 * If any segment has a negative length, or the cumulative
2181 * length ever wraps negative then return -EINVAL.
2183 count += iv->iov_len;
2184 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
2186 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
2191 count -= iv->iov_len; /* This segment is no good */
2196 for (seg = 0; seg < nr_segs; seg++) {
2197 read_descriptor_t desc;
2200 desc.arg.buf = iov[seg].iov_base;
2201 desc.count = iov[seg].iov_len;
2202 if (desc.count == 0)
2205 do_generic_file_read(filp, ppos, &desc,
2207 retval += desc.written;
2209 retval = retval ?: desc.error;
2217 static int create_subvol(struct btrfs_root *root, char *name, int namelen)
2219 struct btrfs_trans_handle *trans;
2220 struct btrfs_key key;
2221 struct btrfs_root_item root_item;
2222 struct btrfs_inode_item *inode_item;
2223 struct buffer_head *subvol;
2224 struct btrfs_leaf *leaf;
2225 struct btrfs_root *new_root;
2226 struct inode *inode;
2230 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2232 mutex_lock(&root->fs_info->fs_mutex);
2233 trans = btrfs_start_transaction(root, 1);
2236 subvol = btrfs_alloc_free_block(trans, root, 0);
2239 leaf = btrfs_buffer_leaf(subvol);
2240 btrfs_set_header_nritems(&leaf->header, 0);
2241 btrfs_set_header_level(&leaf->header, 0);
2242 btrfs_set_header_blocknr(&leaf->header, bh_blocknr(subvol));
2243 btrfs_set_header_generation(&leaf->header, trans->transid);
2244 btrfs_set_header_owner(&leaf->header, root->root_key.objectid);
2245 memcpy(leaf->header.fsid, root->fs_info->disk_super->fsid,
2246 sizeof(leaf->header.fsid));
2247 mark_buffer_dirty(subvol);
2249 inode_item = &root_item.inode;
2250 memset(inode_item, 0, sizeof(*inode_item));
2251 btrfs_set_inode_generation(inode_item, 1);
2252 btrfs_set_inode_size(inode_item, 3);
2253 btrfs_set_inode_nlink(inode_item, 1);
2254 btrfs_set_inode_nblocks(inode_item, 1);
2255 btrfs_set_inode_mode(inode_item, S_IFDIR | 0755);
2257 btrfs_set_root_blocknr(&root_item, bh_blocknr(subvol));
2258 btrfs_set_root_refs(&root_item, 1);
2262 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2266 btrfs_set_root_dirid(&root_item, new_dirid);
2268 key.objectid = objectid;
2271 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2272 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2277 * insert the directory item
2279 key.offset = (u64)-1;
2280 dir = root->fs_info->sb->s_root->d_inode;
2281 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2282 name, namelen, dir->i_ino, &key, 0);
2285 ret = btrfs_commit_transaction(trans, root);
2288 new_root = btrfs_read_fs_root(root->fs_info, &key);
2291 trans = btrfs_start_transaction(new_root, 1);
2294 inode = btrfs_new_inode(trans, new_root, new_dirid,
2295 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2296 inode->i_op = &btrfs_dir_inode_operations;
2297 inode->i_fop = &btrfs_dir_file_operations;
2299 ret = btrfs_make_empty_dir(trans, new_root, new_dirid, new_dirid);
2304 ret = btrfs_update_inode(trans, new_root, inode);
2307 ret = btrfs_commit_transaction(trans, new_root);
2312 mutex_unlock(&root->fs_info->fs_mutex);
2313 btrfs_btree_balance_dirty(root);
2317 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2319 struct btrfs_trans_handle *trans;
2320 struct btrfs_key key;
2321 struct btrfs_root_item new_root_item;
2325 if (!root->ref_cows)
2328 mutex_lock(&root->fs_info->fs_mutex);
2329 trans = btrfs_start_transaction(root, 1);
2332 ret = btrfs_update_inode(trans, root, root->inode);
2335 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2339 memcpy(&new_root_item, &root->root_item,
2340 sizeof(new_root_item));
2342 key.objectid = objectid;
2345 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2346 btrfs_set_root_blocknr(&new_root_item, bh_blocknr(root->node));
2348 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2353 * insert the directory item
2355 key.offset = (u64)-1;
2356 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2358 root->fs_info->sb->s_root->d_inode->i_ino,
2363 ret = btrfs_inc_root_ref(trans, root);
2366 ret = btrfs_commit_transaction(trans, root);
2368 mutex_unlock(&root->fs_info->fs_mutex);
2369 btrfs_btree_balance_dirty(root);
2373 static int add_disk(struct btrfs_root *root, char *name, int namelen)
2375 struct block_device *bdev;
2376 struct btrfs_path *path;
2377 struct super_block *sb = root->fs_info->sb;
2378 struct btrfs_root *dev_root = root->fs_info->dev_root;
2379 struct btrfs_trans_handle *trans;
2380 struct btrfs_device_item *dev_item;
2381 struct btrfs_key key;
2388 printk("adding disk %s\n", name);
2389 path = btrfs_alloc_path();
2392 num_blocks = btrfs_super_total_blocks(root->fs_info->disk_super);
2393 bdev = open_bdev_excl(name, O_RDWR, sb);
2395 ret = PTR_ERR(bdev);
2396 printk("open bdev excl failed ret %d\n", ret);
2399 set_blocksize(bdev, sb->s_blocksize);
2400 new_blocks = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
2401 key.objectid = num_blocks;
2402 key.offset = new_blocks;
2404 btrfs_set_key_type(&key, BTRFS_DEV_ITEM_KEY);
2406 mutex_lock(&dev_root->fs_info->fs_mutex);
2407 trans = btrfs_start_transaction(dev_root, 1);
2408 item_size = sizeof(*dev_item) + namelen;
2409 printk("insert empty on %Lu %Lu %u size %d\n", num_blocks, new_blocks, key.flags, item_size);
2410 ret = btrfs_insert_empty_item(trans, dev_root, path, &key, item_size);
2412 printk("insert failed %d\n", ret);
2413 close_bdev_excl(bdev);
2418 dev_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
2419 path->slots[0], struct btrfs_device_item);
2420 btrfs_set_device_pathlen(dev_item, namelen);
2421 memcpy(dev_item + 1, name, namelen);
2423 device_id = btrfs_super_last_device_id(root->fs_info->disk_super) + 1;
2424 btrfs_set_super_last_device_id(root->fs_info->disk_super, device_id);
2425 btrfs_set_device_id(dev_item, device_id);
2426 mark_buffer_dirty(path->nodes[0]);
2428 ret = btrfs_insert_dev_radix(root, bdev, device_id, num_blocks,
2432 btrfs_set_super_total_blocks(root->fs_info->disk_super,
2433 num_blocks + new_blocks);
2434 i_size_write(root->fs_info->btree_inode,
2435 (num_blocks + new_blocks) <<
2436 root->fs_info->btree_inode->i_blkbits);
2440 ret = btrfs_commit_transaction(trans, dev_root);
2442 mutex_unlock(&root->fs_info->fs_mutex);
2444 btrfs_free_path(path);
2445 btrfs_btree_balance_dirty(root);
2450 static int btrfs_ioctl(struct inode *inode, struct file *filp, unsigned int
2451 cmd, unsigned long arg)
2453 struct btrfs_root *root = BTRFS_I(inode)->root;
2454 struct btrfs_ioctl_vol_args vol_args;
2456 struct btrfs_dir_item *di;
2458 struct btrfs_path *path;
2462 case BTRFS_IOC_SNAP_CREATE:
2463 if (copy_from_user(&vol_args,
2464 (struct btrfs_ioctl_vol_args __user *)arg,
2467 namelen = strlen(vol_args.name);
2468 if (namelen > BTRFS_VOL_NAME_MAX)
2470 path = btrfs_alloc_path();
2473 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2474 mutex_lock(&root->fs_info->fs_mutex);
2475 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2477 vol_args.name, namelen, 0);
2478 mutex_unlock(&root->fs_info->fs_mutex);
2479 btrfs_free_path(path);
2480 if (di && !IS_ERR(di))
2483 if (root == root->fs_info->tree_root)
2484 ret = create_subvol(root, vol_args.name, namelen);
2486 ret = create_snapshot(root, vol_args.name, namelen);
2489 case BTRFS_IOC_ADD_DISK:
2490 if (copy_from_user(&vol_args,
2491 (struct btrfs_ioctl_vol_args __user *)arg,
2494 namelen = strlen(vol_args.name);
2495 if (namelen > BTRFS_VOL_NAME_MAX)
2497 vol_args.name[namelen] = '\0';
2498 ret = add_disk(root, vol_args.name, namelen);
2506 static struct kmem_cache *btrfs_inode_cachep;
2507 struct kmem_cache *btrfs_trans_handle_cachep;
2508 struct kmem_cache *btrfs_transaction_cachep;
2509 struct kmem_cache *btrfs_bit_radix_cachep;
2510 struct kmem_cache *btrfs_path_cachep;
2513 * Called inside transaction, so use GFP_NOFS
2515 static struct inode *btrfs_alloc_inode(struct super_block *sb)
2517 struct btrfs_inode *ei;
2519 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2522 return &ei->vfs_inode;
2525 static void btrfs_destroy_inode(struct inode *inode)
2527 WARN_ON(!list_empty(&inode->i_dentry));
2528 WARN_ON(inode->i_data.nrpages);
2530 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2533 static void init_once(void * foo, struct kmem_cache * cachep,
2534 unsigned long flags)
2536 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2538 if ((flags & (SLAB_CTOR_CONSTRUCTOR)) ==
2539 SLAB_CTOR_CONSTRUCTOR) {
2540 inode_init_once(&ei->vfs_inode);
2544 static int init_inodecache(void)
2546 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
2547 sizeof(struct btrfs_inode),
2548 0, (SLAB_RECLAIM_ACCOUNT|
2551 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
2552 sizeof(struct btrfs_trans_handle),
2553 0, (SLAB_RECLAIM_ACCOUNT|
2556 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
2557 sizeof(struct btrfs_transaction),
2558 0, (SLAB_RECLAIM_ACCOUNT|
2561 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
2562 sizeof(struct btrfs_transaction),
2563 0, (SLAB_RECLAIM_ACCOUNT|
2566 btrfs_bit_radix_cachep = kmem_cache_create("btrfs_radix",
2568 0, (SLAB_RECLAIM_ACCOUNT|
2570 SLAB_DESTROY_BY_RCU),
2572 if (btrfs_inode_cachep == NULL || btrfs_trans_handle_cachep == NULL ||
2573 btrfs_transaction_cachep == NULL || btrfs_bit_radix_cachep == NULL)
2578 static void destroy_inodecache(void)
2580 kmem_cache_destroy(btrfs_inode_cachep);
2581 kmem_cache_destroy(btrfs_trans_handle_cachep);
2582 kmem_cache_destroy(btrfs_transaction_cachep);
2583 kmem_cache_destroy(btrfs_bit_radix_cachep);
2584 kmem_cache_destroy(btrfs_path_cachep);
2587 static int btrfs_get_sb(struct file_system_type *fs_type,
2588 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2590 return get_sb_bdev(fs_type, flags, dev_name, data,
2591 btrfs_fill_super, mnt);
2594 static int btrfs_getattr(struct vfsmount *mnt,
2595 struct dentry *dentry, struct kstat *stat)
2597 struct inode *inode = dentry->d_inode;
2598 generic_fillattr(inode, stat);
2599 stat->blksize = 256 * 1024;
2603 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2605 struct btrfs_root *root = btrfs_sb(dentry->d_sb);
2606 struct btrfs_super_block *disk_super = root->fs_info->disk_super;
2608 buf->f_namelen = BTRFS_NAME_LEN;
2609 buf->f_blocks = btrfs_super_total_blocks(disk_super);
2610 buf->f_bfree = buf->f_blocks - btrfs_super_blocks_used(disk_super);
2611 buf->f_bavail = buf->f_bfree;
2612 buf->f_bsize = dentry->d_sb->s_blocksize;
2613 buf->f_type = BTRFS_SUPER_MAGIC;
2617 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
2618 struct inode * new_dir,struct dentry *new_dentry)
2620 struct btrfs_trans_handle *trans;
2621 struct btrfs_root *root = BTRFS_I(old_dir)->root;
2622 struct inode *new_inode = new_dentry->d_inode;
2623 struct inode *old_inode = old_dentry->d_inode;
2624 struct timespec ctime = CURRENT_TIME;
2625 struct btrfs_path *path;
2626 struct btrfs_dir_item *di;
2629 if (S_ISDIR(old_inode->i_mode) && new_inode &&
2630 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
2633 mutex_lock(&root->fs_info->fs_mutex);
2634 trans = btrfs_start_transaction(root, 1);
2635 btrfs_set_trans_block_group(trans, new_dir);
2636 path = btrfs_alloc_path();
2642 old_dentry->d_inode->i_nlink++;
2643 old_dir->i_ctime = old_dir->i_mtime = ctime;
2644 new_dir->i_ctime = new_dir->i_mtime = ctime;
2645 old_inode->i_ctime = ctime;
2646 if (S_ISDIR(old_inode->i_mode) && old_dir != new_dir) {
2647 struct btrfs_key *location = &BTRFS_I(new_dir)->location;
2649 di = btrfs_lookup_dir_item(trans, root, path, old_inode->i_ino,
2659 old_parent_oid = btrfs_disk_key_objectid(&di->location);
2660 ret = btrfs_del_item(trans, root, path);
2665 btrfs_release_path(root, path);
2667 di = btrfs_lookup_dir_index_item(trans, root, path,
2679 ret = btrfs_del_item(trans, root, path);
2684 btrfs_release_path(root, path);
2686 ret = btrfs_insert_dir_item(trans, root, "..", 2,
2687 old_inode->i_ino, location, 0);
2693 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
2698 new_inode->i_ctime = CURRENT_TIME;
2699 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
2702 if (S_ISDIR(new_inode->i_mode))
2703 clear_nlink(new_inode);
2705 drop_nlink(new_inode);
2706 btrfs_update_inode(trans, root, new_inode);
2708 ret = btrfs_add_link(trans, new_dentry, old_inode);
2713 btrfs_free_path(path);
2714 btrfs_end_transaction(trans, root);
2715 mutex_unlock(&root->fs_info->fs_mutex);
2719 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2720 const char *symname)
2722 struct btrfs_trans_handle *trans;
2723 struct btrfs_root *root = BTRFS_I(dir)->root;
2724 struct btrfs_path *path;
2725 struct btrfs_key key;
2726 struct inode *inode;
2733 struct btrfs_file_extent_item *ei;
2735 name_len = strlen(symname) + 1;
2736 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
2737 return -ENAMETOOLONG;
2738 mutex_lock(&root->fs_info->fs_mutex);
2739 trans = btrfs_start_transaction(root, 1);
2740 btrfs_set_trans_block_group(trans, dir);
2742 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2748 inode = btrfs_new_inode(trans, root, objectid,
2749 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
2750 err = PTR_ERR(inode);
2754 btrfs_set_trans_block_group(trans, inode);
2755 err = btrfs_add_nondir(trans, dentry, inode);
2759 inode->i_mapping->a_ops = &btrfs_aops;
2760 inode->i_fop = &btrfs_file_operations;
2761 inode->i_op = &btrfs_file_inode_operations;
2763 dir->i_sb->s_dirt = 1;
2764 btrfs_update_inode_block_group(trans, inode);
2765 btrfs_update_inode_block_group(trans, dir);
2769 path = btrfs_alloc_path();
2771 key.objectid = inode->i_ino;
2774 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
2775 datasize = btrfs_file_extent_calc_inline_size(name_len);
2776 err = btrfs_insert_empty_item(trans, root, path, &key,
2779 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
2780 path->slots[0], struct btrfs_file_extent_item);
2781 btrfs_set_file_extent_generation(ei, trans->transid);
2782 btrfs_set_file_extent_type(ei,
2783 BTRFS_FILE_EXTENT_INLINE);
2784 ptr = btrfs_file_extent_inline_start(ei);
2785 btrfs_memcpy(root, path->nodes[0]->b_data,
2786 ptr, symname, name_len);
2787 mark_buffer_dirty(path->nodes[0]);
2788 btrfs_free_path(path);
2789 inode->i_op = &btrfs_symlink_inode_operations;
2790 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2791 inode->i_size = name_len - 1;
2792 btrfs_update_inode(trans, root, inode);
2796 btrfs_end_transaction(trans, root);
2797 mutex_unlock(&root->fs_info->fs_mutex);
2800 inode_dec_link_count(inode);
2803 btrfs_btree_balance_dirty(root);
2807 static struct file_system_type btrfs_fs_type = {
2808 .owner = THIS_MODULE,
2810 .get_sb = btrfs_get_sb,
2811 .kill_sb = kill_block_super,
2812 .fs_flags = FS_REQUIRES_DEV,
2815 static struct super_operations btrfs_super_ops = {
2816 .delete_inode = btrfs_delete_inode,
2817 .put_super = btrfs_put_super,
2818 .read_inode = btrfs_read_locked_inode,
2819 .write_super = btrfs_write_super,
2820 .sync_fs = btrfs_sync_fs,
2821 .write_inode = btrfs_write_inode,
2822 .dirty_inode = btrfs_dirty_inode,
2823 .alloc_inode = btrfs_alloc_inode,
2824 .destroy_inode = btrfs_destroy_inode,
2825 .statfs = btrfs_statfs,
2828 static struct inode_operations btrfs_dir_inode_operations = {
2829 .lookup = btrfs_lookup,
2830 .create = btrfs_create,
2831 .unlink = btrfs_unlink,
2833 .mkdir = btrfs_mkdir,
2834 .rmdir = btrfs_rmdir,
2835 .rename = btrfs_rename,
2836 .symlink = btrfs_symlink,
2837 .setattr = btrfs_setattr,
2840 static struct inode_operations btrfs_dir_ro_inode_operations = {
2841 .lookup = btrfs_lookup,
2844 static struct file_operations btrfs_dir_file_operations = {
2845 .llseek = generic_file_llseek,
2846 .read = generic_read_dir,
2847 .readdir = btrfs_readdir,
2848 .ioctl = btrfs_ioctl,
2851 static struct address_space_operations btrfs_aops = {
2852 .readpage = btrfs_readpage,
2853 .writepage = btrfs_writepage,
2854 .sync_page = block_sync_page,
2855 .prepare_write = btrfs_prepare_write,
2856 .commit_write = btrfs_commit_write,
2859 static struct address_space_operations btrfs_symlink_aops = {
2860 .readpage = btrfs_readpage,
2861 .writepage = btrfs_writepage,
2864 static struct inode_operations btrfs_file_inode_operations = {
2865 .truncate = btrfs_truncate,
2866 .getattr = btrfs_getattr,
2867 .setattr = btrfs_setattr,
2870 static struct file_operations btrfs_file_operations = {
2871 .llseek = generic_file_llseek,
2872 .read = do_sync_read,
2873 .aio_read = btrfs_file_aio_read,
2874 .write = btrfs_file_write,
2875 .mmap = generic_file_mmap,
2876 .open = generic_file_open,
2877 .ioctl = btrfs_ioctl,
2878 .fsync = btrfs_sync_file,
2881 static struct inode_operations btrfs_symlink_inode_operations = {
2882 .readlink = generic_readlink,
2883 .follow_link = page_follow_link_light,
2884 .put_link = page_put_link,
2887 static int __init init_btrfs_fs(void)
2890 printk("btrfs loaded!\n");
2891 err = init_inodecache();
2894 return register_filesystem(&btrfs_fs_type);
2895 destroy_inodecache();
2899 static void __exit exit_btrfs_fs(void)
2901 destroy_inodecache();
2902 unregister_filesystem(&btrfs_fs_type);
2903 printk("btrfs unloaded\n");
2906 module_init(init_btrfs_fs)
2907 module_exit(exit_btrfs_fs)
2909 MODULE_LICENSE("GPL");