2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/smp_lock.h>
38 #include <linux/highuid.h>
39 #include <linux/pagemap.h>
40 #include <linux/quotaops.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
47 /* this macro combines low and hi parts of phys. blocknr into ext4_fsblk_t */
48 static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
52 block = le32_to_cpu(ex->ee_start);
53 if (sizeof(ext4_fsblk_t) > 4)
54 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58 /* this macro combines low and hi parts of phys. blocknr into ext4_fsblk_t */
59 static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
63 block = le32_to_cpu(ix->ei_leaf);
64 if (sizeof(ext4_fsblk_t) > 4)
65 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
69 /* the routine stores large phys. blocknr into extent breaking it into parts */
70 static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
72 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
73 if (sizeof(ext4_fsblk_t) > 4)
74 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
77 /* the routine stores large phys. blocknr into index breaking it into parts */
78 static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
80 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 if (sizeof(ext4_fsblk_t) > 4)
82 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
85 static int ext4_ext_check_header(const char *function, struct inode *inode,
86 struct ext4_extent_header *eh)
88 const char *error_msg = NULL;
90 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
91 error_msg = "invalid magic";
94 if (unlikely(eh->eh_max == 0)) {
95 error_msg = "invalid eh_max";
98 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
99 error_msg = "invalid eh_entries";
105 ext4_error(inode->i_sb, function,
106 "bad header in inode #%lu: %s - magic %x, "
107 "entries %u, max %u, depth %u",
108 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
109 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
110 le16_to_cpu(eh->eh_depth));
115 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
119 if (handle->h_buffer_credits > needed)
121 if (!ext4_journal_extend(handle, needed))
123 err = ext4_journal_restart(handle, needed);
133 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
134 struct ext4_ext_path *path)
137 /* path points to block */
138 return ext4_journal_get_write_access(handle, path->p_bh);
140 /* path points to leaf/index in inode body */
141 /* we use in-core data, no need to protect them */
151 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
152 struct ext4_ext_path *path)
156 /* path points to block */
157 err = ext4_journal_dirty_metadata(handle, path->p_bh);
159 /* path points to leaf/index in inode body */
160 err = ext4_mark_inode_dirty(handle, inode);
165 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
166 struct ext4_ext_path *path,
169 struct ext4_inode_info *ei = EXT4_I(inode);
170 ext4_fsblk_t bg_start;
171 ext4_grpblk_t colour;
175 struct ext4_extent *ex;
176 depth = path->p_depth;
178 /* try to predict block placement */
179 if ((ex = path[depth].p_ext))
180 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
182 /* it looks index is empty
183 * try to find starting from index itself */
184 if (path[depth].p_bh)
185 return path[depth].p_bh->b_blocknr;
188 /* OK. use inode's group */
189 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
190 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
191 colour = (current->pid % 16) *
192 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
193 return bg_start + colour + block;
197 ext4_ext_new_block(handle_t *handle, struct inode *inode,
198 struct ext4_ext_path *path,
199 struct ext4_extent *ex, int *err)
201 ext4_fsblk_t goal, newblock;
203 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
204 newblock = ext4_new_block(handle, inode, goal, err);
208 static inline int ext4_ext_space_block(struct inode *inode)
212 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
213 / sizeof(struct ext4_extent);
214 #ifdef AGRESSIVE_TEST
221 static inline int ext4_ext_space_block_idx(struct inode *inode)
225 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
226 / sizeof(struct ext4_extent_idx);
227 #ifdef AGRESSIVE_TEST
234 static inline int ext4_ext_space_root(struct inode *inode)
238 size = sizeof(EXT4_I(inode)->i_data);
239 size -= sizeof(struct ext4_extent_header);
240 size /= sizeof(struct ext4_extent);
241 #ifdef AGRESSIVE_TEST
248 static inline int ext4_ext_space_root_idx(struct inode *inode)
252 size = sizeof(EXT4_I(inode)->i_data);
253 size -= sizeof(struct ext4_extent_header);
254 size /= sizeof(struct ext4_extent_idx);
255 #ifdef AGRESSIVE_TEST
263 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
265 int k, l = path->p_depth;
268 for (k = 0; k <= l; k++, path++) {
270 ext_debug(" %d->"E3FSBLK, le32_to_cpu(path->p_idx->ei_block),
271 idx_pblock(path->p_idx));
272 } else if (path->p_ext) {
273 ext_debug(" %d:%d:"E3FSBLK" ",
274 le32_to_cpu(path->p_ext->ee_block),
275 le16_to_cpu(path->p_ext->ee_len),
276 ext_pblock(path->p_ext));
283 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
285 int depth = ext_depth(inode);
286 struct ext4_extent_header *eh;
287 struct ext4_extent *ex;
293 eh = path[depth].p_hdr;
294 ex = EXT_FIRST_EXTENT(eh);
296 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
297 ext_debug("%d:%d:"E3FSBLK" ", le32_to_cpu(ex->ee_block),
298 le16_to_cpu(ex->ee_len), ext_pblock(ex));
303 #define ext4_ext_show_path(inode,path)
304 #define ext4_ext_show_leaf(inode,path)
307 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
309 int depth = path->p_depth;
312 for (i = 0; i <= depth; i++, path++)
320 * binary search for closest index by given block
323 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
325 struct ext4_extent_header *eh = path->p_hdr;
326 struct ext4_extent_idx *r, *l, *m;
328 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
329 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
330 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
332 ext_debug("binsearch for %d(idx): ", block);
334 l = EXT_FIRST_INDEX(eh) + 1;
335 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
338 if (block < le32_to_cpu(m->ei_block))
342 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
343 m, m->ei_block, r, r->ei_block);
347 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
348 idx_block(path->p_idx));
350 #ifdef CHECK_BINSEARCH
352 struct ext4_extent_idx *chix, *ix;
355 chix = ix = EXT_FIRST_INDEX(eh);
356 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
358 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
359 printk("k=%d, ix=0x%p, first=0x%p\n", k,
360 ix, EXT_FIRST_INDEX(eh));
362 le32_to_cpu(ix->ei_block),
363 le32_to_cpu(ix[-1].ei_block));
365 BUG_ON(k && le32_to_cpu(ix->ei_block)
366 <= le32_to_cpu(ix[-1].ei_block));
367 if (block < le32_to_cpu(ix->ei_block))
371 BUG_ON(chix != path->p_idx);
378 * binary search for closest extent by given block
381 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
383 struct ext4_extent_header *eh = path->p_hdr;
384 struct ext4_extent *r, *l, *m;
386 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
387 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
389 if (eh->eh_entries == 0) {
391 * this leaf is empty yet:
392 * we get such a leaf in split/add case
397 ext_debug("binsearch for %d: ", block);
399 l = EXT_FIRST_EXTENT(eh) + 1;
400 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
404 if (block < le32_to_cpu(m->ee_block))
408 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
409 m, m->ee_block, r, r->ee_block);
413 ext_debug(" -> %d:"E3FSBLK":%d ",
414 le32_to_cpu(path->p_ext->ee_block),
415 ext_pblock(path->p_ext),
416 le16_to_cpu(path->p_ext->ee_len));
418 #ifdef CHECK_BINSEARCH
420 struct ext4_extent *chex, *ex;
423 chex = ex = EXT_FIRST_EXTENT(eh);
424 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
425 BUG_ON(k && le32_to_cpu(ex->ee_block)
426 <= le32_to_cpu(ex[-1].ee_block));
427 if (block < le32_to_cpu(ex->ee_block))
431 BUG_ON(chex != path->p_ext);
437 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
439 struct ext4_extent_header *eh;
441 eh = ext_inode_hdr(inode);
444 eh->eh_magic = EXT4_EXT_MAGIC;
445 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
446 ext4_mark_inode_dirty(handle, inode);
447 ext4_ext_invalidate_cache(inode);
451 struct ext4_ext_path *
452 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
454 struct ext4_extent_header *eh;
455 struct buffer_head *bh;
456 short int depth, i, ppos = 0, alloc = 0;
458 eh = ext_inode_hdr(inode);
460 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
461 return ERR_PTR(-EIO);
463 i = depth = ext_depth(inode);
465 /* account possible depth increase */
467 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
470 return ERR_PTR(-ENOMEM);
473 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
476 /* walk through the tree */
478 ext_debug("depth %d: num %d, max %d\n",
479 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
480 ext4_ext_binsearch_idx(inode, path + ppos, block);
481 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
482 path[ppos].p_depth = i;
483 path[ppos].p_ext = NULL;
485 bh = sb_bread(inode->i_sb, path[ppos].p_block);
489 eh = ext_block_hdr(bh);
491 BUG_ON(ppos > depth);
492 path[ppos].p_bh = bh;
493 path[ppos].p_hdr = eh;
496 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
500 path[ppos].p_depth = i;
501 path[ppos].p_hdr = eh;
502 path[ppos].p_ext = NULL;
503 path[ppos].p_idx = NULL;
505 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
509 ext4_ext_binsearch(inode, path + ppos, block);
511 ext4_ext_show_path(inode, path);
516 ext4_ext_drop_refs(path);
519 return ERR_PTR(-EIO);
523 * insert new index [logical;ptr] into the block at cupr
524 * it check where to insert: before curp or after curp
526 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
527 struct ext4_ext_path *curp,
528 int logical, ext4_fsblk_t ptr)
530 struct ext4_extent_idx *ix;
533 if ((err = ext4_ext_get_access(handle, inode, curp)))
536 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
537 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
538 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
540 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
541 len = (len - 1) * sizeof(struct ext4_extent_idx);
542 len = len < 0 ? 0 : len;
543 ext_debug("insert new index %d after: %d. "
544 "move %d from 0x%p to 0x%p\n",
546 (curp->p_idx + 1), (curp->p_idx + 2));
547 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
549 ix = curp->p_idx + 1;
552 len = len * sizeof(struct ext4_extent_idx);
553 len = len < 0 ? 0 : len;
554 ext_debug("insert new index %d before: %d. "
555 "move %d from 0x%p to 0x%p\n",
557 curp->p_idx, (curp->p_idx + 1));
558 memmove(curp->p_idx + 1, curp->p_idx, len);
562 ix->ei_block = cpu_to_le32(logical);
563 ext4_idx_store_pblock(ix, ptr);
564 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
566 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
567 > le16_to_cpu(curp->p_hdr->eh_max));
568 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
570 err = ext4_ext_dirty(handle, inode, curp);
571 ext4_std_error(inode->i_sb, err);
577 * routine inserts new subtree into the path, using free index entry
579 * - allocates all needed blocks (new leaf and all intermediate index blocks)
580 * - makes decision where to split
581 * - moves remaining extens and index entries (right to the split point)
582 * into the newly allocated blocks
583 * - initialize subtree
585 static int ext4_ext_split(handle_t *handle, struct inode *inode,
586 struct ext4_ext_path *path,
587 struct ext4_extent *newext, int at)
589 struct buffer_head *bh = NULL;
590 int depth = ext_depth(inode);
591 struct ext4_extent_header *neh;
592 struct ext4_extent_idx *fidx;
593 struct ext4_extent *ex;
595 ext4_fsblk_t newblock, oldblock;
597 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
600 /* make decision: where to split? */
601 /* FIXME: now desicion is simplest: at current extent */
603 /* if current leaf will be splitted, then we should use
604 * border from split point */
605 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
606 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
607 border = path[depth].p_ext[1].ee_block;
608 ext_debug("leaf will be splitted."
609 " next leaf starts at %d\n",
610 le32_to_cpu(border));
612 border = newext->ee_block;
613 ext_debug("leaf will be added."
614 " next leaf starts at %d\n",
615 le32_to_cpu(border));
619 * if error occurs, then we break processing
620 * and turn filesystem read-only. so, index won't
621 * be inserted and tree will be in consistent
622 * state. next mount will repair buffers too
626 * get array to track all allocated blocks
627 * we need this to handle errors and free blocks
630 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
633 memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
635 /* allocate all needed blocks */
636 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
637 for (a = 0; a < depth - at; a++) {
638 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
641 ablocks[a] = newblock;
644 /* initialize new leaf */
645 newblock = ablocks[--a];
646 BUG_ON(newblock == 0);
647 bh = sb_getblk(inode->i_sb, newblock);
654 if ((err = ext4_journal_get_create_access(handle, bh)))
657 neh = ext_block_hdr(bh);
659 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
660 neh->eh_magic = EXT4_EXT_MAGIC;
662 ex = EXT_FIRST_EXTENT(neh);
664 /* move remain of path[depth] to the new leaf */
665 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
666 /* start copy from next extent */
667 /* TODO: we could do it by single memmove */
670 while (path[depth].p_ext <=
671 EXT_MAX_EXTENT(path[depth].p_hdr)) {
672 ext_debug("move %d:"E3FSBLK":%d in new leaf "E3FSBLK"\n",
673 le32_to_cpu(path[depth].p_ext->ee_block),
674 ext_pblock(path[depth].p_ext),
675 le16_to_cpu(path[depth].p_ext->ee_len),
677 /*memmove(ex++, path[depth].p_ext++,
678 sizeof(struct ext4_extent));
684 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
685 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
688 set_buffer_uptodate(bh);
691 if ((err = ext4_journal_dirty_metadata(handle, bh)))
696 /* correct old leaf */
698 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
700 path[depth].p_hdr->eh_entries =
701 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
702 if ((err = ext4_ext_dirty(handle, inode, path + depth)))
707 /* create intermediate indexes */
711 ext_debug("create %d intermediate indices\n", k);
712 /* insert new index into current index block */
713 /* current depth stored in i var */
717 newblock = ablocks[--a];
718 bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
725 if ((err = ext4_journal_get_create_access(handle, bh)))
728 neh = ext_block_hdr(bh);
729 neh->eh_entries = cpu_to_le16(1);
730 neh->eh_magic = EXT4_EXT_MAGIC;
731 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
732 neh->eh_depth = cpu_to_le16(depth - i);
733 fidx = EXT_FIRST_INDEX(neh);
734 fidx->ei_block = border;
735 ext4_idx_store_pblock(fidx, oldblock);
737 ext_debug("int.index at %d (block "E3FSBLK"): %lu -> "E3FSBLK"\n", i,
738 newblock, (unsigned long) le32_to_cpu(border),
744 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
745 EXT_MAX_INDEX(path[i].p_hdr));
746 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
747 EXT_LAST_INDEX(path[i].p_hdr));
748 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
749 ext_debug("%d: move %d:%d in new index "E3FSBLK"\n", i,
750 le32_to_cpu(path[i].p_idx->ei_block),
751 idx_pblock(path[i].p_idx),
753 /*memmove(++fidx, path[i].p_idx++,
754 sizeof(struct ext4_extent_idx));
756 BUG_ON(neh->eh_entries > neh->eh_max);*/
761 memmove(++fidx, path[i].p_idx - m,
762 sizeof(struct ext4_extent_idx) * m);
764 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
766 set_buffer_uptodate(bh);
769 if ((err = ext4_journal_dirty_metadata(handle, bh)))
774 /* correct old index */
776 err = ext4_ext_get_access(handle, inode, path + i);
779 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
780 err = ext4_ext_dirty(handle, inode, path + i);
788 /* insert new index */
792 err = ext4_ext_insert_index(handle, inode, path + at,
793 le32_to_cpu(border), newblock);
797 if (buffer_locked(bh))
803 /* free all allocated blocks in error case */
804 for (i = 0; i < depth; i++) {
807 ext4_free_blocks(handle, inode, ablocks[i], 1);
816 * routine implements tree growing procedure:
817 * - allocates new block
818 * - moves top-level data (index block or leaf) into the new block
819 * - initialize new top-level, creating index that points to the
822 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
823 struct ext4_ext_path *path,
824 struct ext4_extent *newext)
826 struct ext4_ext_path *curp = path;
827 struct ext4_extent_header *neh;
828 struct ext4_extent_idx *fidx;
829 struct buffer_head *bh;
830 ext4_fsblk_t newblock;
833 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
837 bh = sb_getblk(inode->i_sb, newblock);
840 ext4_std_error(inode->i_sb, err);
845 if ((err = ext4_journal_get_create_access(handle, bh))) {
850 /* move top-level index/leaf into new block */
851 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
853 /* set size of new block */
854 neh = ext_block_hdr(bh);
855 /* old root could have indexes or leaves
856 * so calculate e_max right way */
857 if (ext_depth(inode))
858 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
860 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
861 neh->eh_magic = EXT4_EXT_MAGIC;
862 set_buffer_uptodate(bh);
865 if ((err = ext4_journal_dirty_metadata(handle, bh)))
868 /* create index in new top-level index: num,max,pointer */
869 if ((err = ext4_ext_get_access(handle, inode, curp)))
872 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
873 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
874 curp->p_hdr->eh_entries = cpu_to_le16(1);
875 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
876 /* FIXME: it works, but actually path[0] can be index */
877 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
878 ext4_idx_store_pblock(curp->p_idx, newblock);
880 neh = ext_inode_hdr(inode);
881 fidx = EXT_FIRST_INDEX(neh);
882 ext_debug("new root: num %d(%d), lblock %d, ptr "E3FSBLK"\n",
883 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
884 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
886 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
887 err = ext4_ext_dirty(handle, inode, curp);
895 * routine finds empty index and adds new leaf. if no free index found
896 * then it requests in-depth growing
898 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
899 struct ext4_ext_path *path,
900 struct ext4_extent *newext)
902 struct ext4_ext_path *curp;
903 int depth, i, err = 0;
906 i = depth = ext_depth(inode);
908 /* walk up to the tree and look for free index entry */
910 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
915 /* we use already allocated block for index block
916 * so, subsequent data blocks should be contigoues */
917 if (EXT_HAS_FREE_INDEX(curp)) {
918 /* if we found index with free entry, then use that
919 * entry: create all needed subtree and add new leaf */
920 err = ext4_ext_split(handle, inode, path, newext, i);
923 ext4_ext_drop_refs(path);
924 path = ext4_ext_find_extent(inode,
925 le32_to_cpu(newext->ee_block),
930 /* tree is full, time to grow in depth */
931 err = ext4_ext_grow_indepth(handle, inode, path, newext);
936 ext4_ext_drop_refs(path);
937 path = ext4_ext_find_extent(inode,
938 le32_to_cpu(newext->ee_block),
946 * only first (depth 0 -> 1) produces free space
947 * in all other cases we have to split growed tree
949 depth = ext_depth(inode);
950 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
951 /* now we need split */
961 * returns allocated block in subsequent extent or EXT_MAX_BLOCK
962 * NOTE: it consider block number from index entry as
963 * allocated block. thus, index entries have to be consistent
967 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
971 BUG_ON(path == NULL);
972 depth = path->p_depth;
974 if (depth == 0 && path->p_ext == NULL)
975 return EXT_MAX_BLOCK;
978 if (depth == path->p_depth) {
980 if (path[depth].p_ext !=
981 EXT_LAST_EXTENT(path[depth].p_hdr))
982 return le32_to_cpu(path[depth].p_ext[1].ee_block);
985 if (path[depth].p_idx !=
986 EXT_LAST_INDEX(path[depth].p_hdr))
987 return le32_to_cpu(path[depth].p_idx[1].ei_block);
992 return EXT_MAX_BLOCK;
996 * returns first allocated block from next leaf or EXT_MAX_BLOCK
998 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
999 struct ext4_ext_path *path)
1003 BUG_ON(path == NULL);
1004 depth = path->p_depth;
1006 /* zero-tree has no leaf blocks at all */
1008 return EXT_MAX_BLOCK;
1010 /* go to index block */
1013 while (depth >= 0) {
1014 if (path[depth].p_idx !=
1015 EXT_LAST_INDEX(path[depth].p_hdr))
1016 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1020 return EXT_MAX_BLOCK;
1024 * if leaf gets modified and modified extent is first in the leaf
1025 * then we have to correct all indexes above
1026 * TODO: do we need to correct tree in all cases?
1028 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1029 struct ext4_ext_path *path)
1031 struct ext4_extent_header *eh;
1032 int depth = ext_depth(inode);
1033 struct ext4_extent *ex;
1037 eh = path[depth].p_hdr;
1038 ex = path[depth].p_ext;
1043 /* there is no tree at all */
1047 if (ex != EXT_FIRST_EXTENT(eh)) {
1048 /* we correct tree if first leaf got modified only */
1053 * TODO: we need correction if border is smaller then current one
1056 border = path[depth].p_ext->ee_block;
1057 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1059 path[k].p_idx->ei_block = border;
1060 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1064 /* change all left-side indexes */
1065 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1067 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1069 path[k].p_idx->ei_block = border;
1070 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1078 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1079 struct ext4_extent *ex2)
1081 if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len)
1082 != le32_to_cpu(ex2->ee_block))
1086 * To allow future support for preallocated extents to be added
1087 * as an RO_COMPAT feature, refuse to merge to extents if
1088 * can result in the top bit of ee_len being set
1090 if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
1092 #ifdef AGRESSIVE_TEST
1093 if (le16_to_cpu(ex1->ee_len) >= 4)
1097 if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
1103 * this routine tries to merge requsted extent into the existing
1104 * extent or inserts requested extent as new one into the tree,
1105 * creating new leaf in no-space case
1107 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1108 struct ext4_ext_path *path,
1109 struct ext4_extent *newext)
1111 struct ext4_extent_header * eh;
1112 struct ext4_extent *ex, *fex;
1113 struct ext4_extent *nearex; /* nearest extent */
1114 struct ext4_ext_path *npath = NULL;
1115 int depth, len, err, next;
1117 BUG_ON(newext->ee_len == 0);
1118 depth = ext_depth(inode);
1119 ex = path[depth].p_ext;
1120 BUG_ON(path[depth].p_hdr == NULL);
1122 /* try to insert block into found extent and return */
1123 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1124 ext_debug("append %d block to %d:%d (from "E3FSBLK")\n",
1125 le16_to_cpu(newext->ee_len),
1126 le32_to_cpu(ex->ee_block),
1127 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1128 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1130 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1131 + le16_to_cpu(newext->ee_len));
1132 eh = path[depth].p_hdr;
1138 depth = ext_depth(inode);
1139 eh = path[depth].p_hdr;
1140 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1143 /* probably next leaf has space for us? */
1144 fex = EXT_LAST_EXTENT(eh);
1145 next = ext4_ext_next_leaf_block(inode, path);
1146 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1147 && next != EXT_MAX_BLOCK) {
1148 ext_debug("next leaf block - %d\n", next);
1149 BUG_ON(npath != NULL);
1150 npath = ext4_ext_find_extent(inode, next, NULL);
1152 return PTR_ERR(npath);
1153 BUG_ON(npath->p_depth != path->p_depth);
1154 eh = npath[depth].p_hdr;
1155 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1156 ext_debug("next leaf isnt full(%d)\n",
1157 le16_to_cpu(eh->eh_entries));
1161 ext_debug("next leaf has no free space(%d,%d)\n",
1162 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1166 * there is no free space in found leaf
1167 * we're gonna add new leaf in the tree
1169 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1172 depth = ext_depth(inode);
1173 eh = path[depth].p_hdr;
1176 nearex = path[depth].p_ext;
1178 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1182 /* there is no extent in this leaf, create first one */
1183 ext_debug("first extent in the leaf: %d:"E3FSBLK":%d\n",
1184 le32_to_cpu(newext->ee_block),
1186 le16_to_cpu(newext->ee_len));
1187 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1188 } else if (le32_to_cpu(newext->ee_block)
1189 > le32_to_cpu(nearex->ee_block)) {
1190 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1191 if (nearex != EXT_LAST_EXTENT(eh)) {
1192 len = EXT_MAX_EXTENT(eh) - nearex;
1193 len = (len - 1) * sizeof(struct ext4_extent);
1194 len = len < 0 ? 0 : len;
1195 ext_debug("insert %d:"E3FSBLK":%d after: nearest 0x%p, "
1196 "move %d from 0x%p to 0x%p\n",
1197 le32_to_cpu(newext->ee_block),
1199 le16_to_cpu(newext->ee_len),
1200 nearex, len, nearex + 1, nearex + 2);
1201 memmove(nearex + 2, nearex + 1, len);
1203 path[depth].p_ext = nearex + 1;
1205 BUG_ON(newext->ee_block == nearex->ee_block);
1206 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1207 len = len < 0 ? 0 : len;
1208 ext_debug("insert %d:"E3FSBLK":%d before: nearest 0x%p, "
1209 "move %d from 0x%p to 0x%p\n",
1210 le32_to_cpu(newext->ee_block),
1212 le16_to_cpu(newext->ee_len),
1213 nearex, len, nearex + 1, nearex + 2);
1214 memmove(nearex + 1, nearex, len);
1215 path[depth].p_ext = nearex;
1218 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1219 nearex = path[depth].p_ext;
1220 nearex->ee_block = newext->ee_block;
1221 nearex->ee_start = newext->ee_start;
1222 nearex->ee_start_hi = newext->ee_start_hi;
1223 nearex->ee_len = newext->ee_len;
1226 /* try to merge extents to the right */
1227 while (nearex < EXT_LAST_EXTENT(eh)) {
1228 if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
1230 /* merge with next extent! */
1231 nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
1232 + le16_to_cpu(nearex[1].ee_len));
1233 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1234 len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1235 * sizeof(struct ext4_extent);
1236 memmove(nearex + 1, nearex + 2, len);
1238 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1239 BUG_ON(eh->eh_entries == 0);
1242 /* try to merge extents to the left */
1244 /* time to correct all indexes above */
1245 err = ext4_ext_correct_indexes(handle, inode, path);
1249 err = ext4_ext_dirty(handle, inode, path + depth);
1253 ext4_ext_drop_refs(npath);
1256 ext4_ext_tree_changed(inode);
1257 ext4_ext_invalidate_cache(inode);
1261 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1262 unsigned long num, ext_prepare_callback func,
1265 struct ext4_ext_path *path = NULL;
1266 struct ext4_ext_cache cbex;
1267 struct ext4_extent *ex;
1268 unsigned long next, start = 0, end = 0;
1269 unsigned long last = block + num;
1270 int depth, exists, err = 0;
1272 BUG_ON(func == NULL);
1273 BUG_ON(inode == NULL);
1275 while (block < last && block != EXT_MAX_BLOCK) {
1277 /* find extent for this block */
1278 path = ext4_ext_find_extent(inode, block, path);
1280 err = PTR_ERR(path);
1285 depth = ext_depth(inode);
1286 BUG_ON(path[depth].p_hdr == NULL);
1287 ex = path[depth].p_ext;
1288 next = ext4_ext_next_allocated_block(path);
1292 /* there is no extent yet, so try to allocate
1293 * all requested space */
1296 } else if (le32_to_cpu(ex->ee_block) > block) {
1297 /* need to allocate space before found extent */
1299 end = le32_to_cpu(ex->ee_block);
1300 if (block + num < end)
1303 le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
1304 /* need to allocate space after found extent */
1309 } else if (block >= le32_to_cpu(ex->ee_block)) {
1311 * some part of requested space is covered
1315 end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
1316 if (block + num < end)
1322 BUG_ON(end <= start);
1325 cbex.ec_block = start;
1326 cbex.ec_len = end - start;
1328 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1330 cbex.ec_block = le32_to_cpu(ex->ee_block);
1331 cbex.ec_len = le16_to_cpu(ex->ee_len);
1332 cbex.ec_start = ext_pblock(ex);
1333 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1336 BUG_ON(cbex.ec_len == 0);
1337 err = func(inode, path, &cbex, cbdata);
1338 ext4_ext_drop_refs(path);
1342 if (err == EXT_REPEAT)
1344 else if (err == EXT_BREAK) {
1349 if (ext_depth(inode) != depth) {
1350 /* depth was changed. we have to realloc path */
1355 block = cbex.ec_block + cbex.ec_len;
1359 ext4_ext_drop_refs(path);
1367 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1368 __u32 len, __u32 start, int type)
1370 struct ext4_ext_cache *cex;
1372 cex = &EXT4_I(inode)->i_cached_extent;
1373 cex->ec_type = type;
1374 cex->ec_block = block;
1376 cex->ec_start = start;
1380 * this routine calculate boundaries of the gap requested block fits into
1381 * and cache this gap
1384 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1385 unsigned long block)
1387 int depth = ext_depth(inode);
1388 unsigned long lblock, len;
1389 struct ext4_extent *ex;
1391 ex = path[depth].p_ext;
1393 /* there is no extent yet, so gap is [0;-] */
1395 len = EXT_MAX_BLOCK;
1396 ext_debug("cache gap(whole file):");
1397 } else if (block < le32_to_cpu(ex->ee_block)) {
1399 len = le32_to_cpu(ex->ee_block) - block;
1400 ext_debug("cache gap(before): %lu [%lu:%lu]",
1401 (unsigned long) block,
1402 (unsigned long) le32_to_cpu(ex->ee_block),
1403 (unsigned long) le16_to_cpu(ex->ee_len));
1404 } else if (block >= le32_to_cpu(ex->ee_block)
1405 + le16_to_cpu(ex->ee_len)) {
1406 lblock = le32_to_cpu(ex->ee_block)
1407 + le16_to_cpu(ex->ee_len);
1408 len = ext4_ext_next_allocated_block(path);
1409 ext_debug("cache gap(after): [%lu:%lu] %lu",
1410 (unsigned long) le32_to_cpu(ex->ee_block),
1411 (unsigned long) le16_to_cpu(ex->ee_len),
1412 (unsigned long) block);
1413 BUG_ON(len == lblock);
1420 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1421 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1425 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1426 struct ext4_extent *ex)
1428 struct ext4_ext_cache *cex;
1430 cex = &EXT4_I(inode)->i_cached_extent;
1432 /* has cache valid data? */
1433 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1434 return EXT4_EXT_CACHE_NO;
1436 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1437 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1438 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1439 ex->ee_block = cpu_to_le32(cex->ec_block);
1440 ext4_ext_store_pblock(ex, cex->ec_start);
1441 ex->ee_len = cpu_to_le16(cex->ec_len);
1442 ext_debug("%lu cached by %lu:%lu:"E3FSBLK"\n",
1443 (unsigned long) block,
1444 (unsigned long) cex->ec_block,
1445 (unsigned long) cex->ec_len,
1447 return cex->ec_type;
1451 return EXT4_EXT_CACHE_NO;
1455 * routine removes index from the index block
1456 * it's used in truncate case only. thus all requests are for
1457 * last index in the block only
1459 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1460 struct ext4_ext_path *path)
1462 struct buffer_head *bh;
1466 /* free index block */
1468 leaf = idx_pblock(path->p_idx);
1469 BUG_ON(path->p_hdr->eh_entries == 0);
1470 if ((err = ext4_ext_get_access(handle, inode, path)))
1472 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1473 if ((err = ext4_ext_dirty(handle, inode, path)))
1475 ext_debug("index is empty, remove it, free block "E3FSBLK"\n", leaf);
1476 bh = sb_find_get_block(inode->i_sb, leaf);
1477 ext4_forget(handle, 1, inode, bh, leaf);
1478 ext4_free_blocks(handle, inode, leaf, 1);
1483 * This routine returns max. credits extent tree can consume.
1484 * It should be OK for low-performance paths like ->writepage()
1485 * To allow many writing process to fit a single transaction,
1486 * caller should calculate credits under truncate_mutex and
1489 int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1490 struct ext4_ext_path *path)
1495 /* probably there is space in leaf? */
1496 depth = ext_depth(inode);
1497 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1498 < le16_to_cpu(path[depth].p_hdr->eh_max))
1503 * given 32bit logical block (4294967296 blocks), max. tree
1504 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1505 * let's also add one more level for imbalance.
1509 /* allocation of new data block(s) */
1513 * tree can be full, so it'd need to grow in depth:
1514 * allocation + old root + new root
1516 needed += 2 + 1 + 1;
1519 * Index split can happen, we'd need:
1520 * allocate intermediate indexes (bitmap + group)
1521 * + change two blocks at each level, but root (already included)
1523 needed = (depth * 2) + (depth * 2);
1525 /* any allocation modifies superblock */
1531 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1532 struct ext4_extent *ex,
1533 unsigned long from, unsigned long to)
1535 struct buffer_head *bh;
1538 #ifdef EXTENTS_STATS
1540 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1541 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1542 spin_lock(&sbi->s_ext_stats_lock);
1543 sbi->s_ext_blocks += ee_len;
1544 sbi->s_ext_extents++;
1545 if (ee_len < sbi->s_ext_min)
1546 sbi->s_ext_min = ee_len;
1547 if (ee_len > sbi->s_ext_max)
1548 sbi->s_ext_max = ee_len;
1549 if (ext_depth(inode) > sbi->s_depth_max)
1550 sbi->s_depth_max = ext_depth(inode);
1551 spin_unlock(&sbi->s_ext_stats_lock);
1554 if (from >= le32_to_cpu(ex->ee_block)
1555 && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1559 num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
1560 start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
1561 ext_debug("free last %lu blocks starting "E3FSBLK"\n", num, start);
1562 for (i = 0; i < num; i++) {
1563 bh = sb_find_get_block(inode->i_sb, start + i);
1564 ext4_forget(handle, 0, inode, bh, start + i);
1566 ext4_free_blocks(handle, inode, start, num);
1567 } else if (from == le32_to_cpu(ex->ee_block)
1568 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1569 printk("strange request: removal %lu-%lu from %u:%u\n",
1570 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1572 printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1573 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1579 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1580 struct ext4_ext_path *path, unsigned long start)
1582 int err = 0, correct_index = 0;
1583 int depth = ext_depth(inode), credits;
1584 struct ext4_extent_header *eh;
1585 unsigned a, b, block, num;
1586 unsigned long ex_ee_block;
1587 unsigned short ex_ee_len;
1588 struct ext4_extent *ex;
1590 ext_debug("truncate since %lu in leaf\n", start);
1591 if (!path[depth].p_hdr)
1592 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1593 eh = path[depth].p_hdr;
1595 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1596 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1598 /* find where to start removing */
1599 ex = EXT_LAST_EXTENT(eh);
1601 ex_ee_block = le32_to_cpu(ex->ee_block);
1602 ex_ee_len = le16_to_cpu(ex->ee_len);
1604 while (ex >= EXT_FIRST_EXTENT(eh) &&
1605 ex_ee_block + ex_ee_len > start) {
1606 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1607 path[depth].p_ext = ex;
1609 a = ex_ee_block > start ? ex_ee_block : start;
1610 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1611 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1613 ext_debug(" border %u:%u\n", a, b);
1615 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1619 } else if (a != ex_ee_block) {
1620 /* remove tail of the extent */
1621 block = ex_ee_block;
1623 } else if (b != ex_ee_block + ex_ee_len - 1) {
1624 /* remove head of the extent */
1627 /* there is no "make a hole" API yet */
1630 /* remove whole extent: excellent! */
1631 block = ex_ee_block;
1633 BUG_ON(a != ex_ee_block);
1634 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1637 /* at present, extent can't cross block group */
1638 /* leaf + bitmap + group desc + sb + inode */
1640 if (ex == EXT_FIRST_EXTENT(eh)) {
1642 credits += (ext_depth(inode)) + 1;
1645 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1648 handle = ext4_ext_journal_restart(handle, credits);
1649 if (IS_ERR(handle)) {
1650 err = PTR_ERR(handle);
1654 err = ext4_ext_get_access(handle, inode, path + depth);
1658 err = ext4_remove_blocks(handle, inode, ex, a, b);
1663 /* this extent is removed entirely mark slot unused */
1664 ext4_ext_store_pblock(ex, 0);
1665 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1668 ex->ee_block = cpu_to_le32(block);
1669 ex->ee_len = cpu_to_le16(num);
1671 err = ext4_ext_dirty(handle, inode, path + depth);
1675 ext_debug("new extent: %u:%u:"E3FSBLK"\n", block, num,
1678 ex_ee_block = le32_to_cpu(ex->ee_block);
1679 ex_ee_len = le16_to_cpu(ex->ee_len);
1682 if (correct_index && eh->eh_entries)
1683 err = ext4_ext_correct_indexes(handle, inode, path);
1685 /* if this leaf is free, then we should
1686 * remove it from index block above */
1687 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1688 err = ext4_ext_rm_idx(handle, inode, path + depth);
1695 * returns 1 if current index have to be freed (even partial)
1698 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1700 BUG_ON(path->p_idx == NULL);
1702 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1706 * if truncate on deeper level happened it it wasn't partial
1707 * so we have to consider current index for truncation
1709 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1714 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1716 struct super_block *sb = inode->i_sb;
1717 int depth = ext_depth(inode);
1718 struct ext4_ext_path *path;
1722 ext_debug("truncate since %lu\n", start);
1724 /* probably first extent we're gonna free will be last in block */
1725 handle = ext4_journal_start(inode, depth + 1);
1727 return PTR_ERR(handle);
1729 ext4_ext_invalidate_cache(inode);
1732 * we start scanning from right side freeing all the blocks
1733 * after i_size and walking into the deep
1735 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1737 ext4_journal_stop(handle);
1740 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1741 path[0].p_hdr = ext_inode_hdr(inode);
1742 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1746 path[0].p_depth = depth;
1748 while (i >= 0 && err == 0) {
1750 /* this is leaf block */
1751 err = ext4_ext_rm_leaf(handle, inode, path, start);
1752 /* root level have p_bh == NULL, brelse() eats this */
1753 brelse(path[i].p_bh);
1754 path[i].p_bh = NULL;
1759 /* this is index block */
1760 if (!path[i].p_hdr) {
1761 ext_debug("initialize header\n");
1762 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1763 if (ext4_ext_check_header(__FUNCTION__, inode,
1770 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1771 > le16_to_cpu(path[i].p_hdr->eh_max));
1772 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1774 if (!path[i].p_idx) {
1775 /* this level hasn't touched yet */
1776 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1777 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1778 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1780 le16_to_cpu(path[i].p_hdr->eh_entries));
1782 /* we've already was here, see at next index */
1786 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1787 i, EXT_FIRST_INDEX(path[i].p_hdr),
1789 if (ext4_ext_more_to_rm(path + i)) {
1790 /* go to the next level */
1791 ext_debug("move to level %d (block "E3FSBLK")\n",
1792 i + 1, idx_pblock(path[i].p_idx));
1793 memset(path + i + 1, 0, sizeof(*path));
1795 sb_bread(sb, idx_pblock(path[i].p_idx));
1796 if (!path[i+1].p_bh) {
1797 /* should we reset i_size? */
1802 /* put actual number of indexes to know is this
1803 * number got changed at the next iteration */
1804 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1807 /* we finish processing this index, go up */
1808 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1809 /* index is empty, remove it
1810 * handle must be already prepared by the
1811 * truncatei_leaf() */
1812 err = ext4_ext_rm_idx(handle, inode, path + i);
1814 /* root level have p_bh == NULL, brelse() eats this */
1815 brelse(path[i].p_bh);
1816 path[i].p_bh = NULL;
1818 ext_debug("return to level %d\n", i);
1822 /* TODO: flexible tree reduction should be here */
1823 if (path->p_hdr->eh_entries == 0) {
1825 * truncate to zero freed all the tree
1826 * so, we need to correct eh_depth
1828 err = ext4_ext_get_access(handle, inode, path);
1830 ext_inode_hdr(inode)->eh_depth = 0;
1831 ext_inode_hdr(inode)->eh_max =
1832 cpu_to_le16(ext4_ext_space_root(inode));
1833 err = ext4_ext_dirty(handle, inode, path);
1837 ext4_ext_tree_changed(inode);
1838 ext4_ext_drop_refs(path);
1840 ext4_journal_stop(handle);
1846 * called at mount time
1848 void ext4_ext_init(struct super_block *sb)
1851 * possible initialization would be here
1854 if (test_opt(sb, EXTENTS)) {
1855 printk("EXT4-fs: file extents enabled");
1856 #ifdef AGRESSIVE_TEST
1857 printk(", agressive tests");
1859 #ifdef CHECK_BINSEARCH
1860 printk(", check binsearch");
1862 #ifdef EXTENTS_STATS
1866 #ifdef EXTENTS_STATS
1867 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
1868 EXT4_SB(sb)->s_ext_min = 1 << 30;
1869 EXT4_SB(sb)->s_ext_max = 0;
1875 * called at umount time
1877 void ext4_ext_release(struct super_block *sb)
1879 if (!test_opt(sb, EXTENTS))
1882 #ifdef EXTENTS_STATS
1883 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
1884 struct ext4_sb_info *sbi = EXT4_SB(sb);
1885 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
1886 sbi->s_ext_blocks, sbi->s_ext_extents,
1887 sbi->s_ext_blocks / sbi->s_ext_extents);
1888 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
1889 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
1894 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1895 ext4_fsblk_t iblock,
1896 unsigned long max_blocks, struct buffer_head *bh_result,
1897 int create, int extend_disksize)
1899 struct ext4_ext_path *path = NULL;
1900 struct ext4_extent newex, *ex;
1901 ext4_fsblk_t goal, newblock;
1903 unsigned long allocated = 0;
1905 __clear_bit(BH_New, &bh_result->b_state);
1906 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
1907 max_blocks, (unsigned) inode->i_ino);
1908 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1910 /* check in cache */
1911 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
1912 if (goal == EXT4_EXT_CACHE_GAP) {
1914 /* block isn't allocated yet and
1915 * user don't want to allocate it */
1918 /* we should allocate requested block */
1919 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
1920 /* block is already allocated */
1922 - le32_to_cpu(newex.ee_block)
1923 + ext_pblock(&newex);
1924 /* number of remain blocks in the extent */
1925 allocated = le16_to_cpu(newex.ee_len) -
1926 (iblock - le32_to_cpu(newex.ee_block));
1933 /* find extent for this block */
1934 path = ext4_ext_find_extent(inode, iblock, NULL);
1936 err = PTR_ERR(path);
1941 depth = ext_depth(inode);
1944 * consistent leaf must not be empty
1945 * this situations is possible, though, _during_ tree modification
1946 * this is why assert can't be put in ext4_ext_find_extent()
1948 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1950 if ((ex = path[depth].p_ext)) {
1951 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1952 ext4_fsblk_t ee_start = ext_pblock(ex);
1953 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1956 * Allow future support for preallocated extents to be added
1957 * as an RO_COMPAT feature:
1958 * Uninitialized extents are treated as holes, except that
1959 * we avoid (fail) allocating new blocks during a write.
1961 if (ee_len > EXT_MAX_LEN)
1963 /* if found exent covers block, simple return it */
1964 if (iblock >= ee_block && iblock < ee_block + ee_len) {
1965 newblock = iblock - ee_block + ee_start;
1966 /* number of remain blocks in the extent */
1967 allocated = ee_len - (iblock - ee_block);
1968 ext_debug("%d fit into %lu:%d -> "E3FSBLK"\n", (int) iblock,
1969 ee_block, ee_len, newblock);
1970 ext4_ext_put_in_cache(inode, ee_block, ee_len,
1971 ee_start, EXT4_EXT_CACHE_EXTENT);
1977 * requested block isn't allocated yet
1978 * we couldn't try to create block if create flag is zero
1981 /* put just found gap into cache to speedup subsequest reqs */
1982 ext4_ext_put_gap_in_cache(inode, path, iblock);
1986 * Okay, we need to do block allocation. Lazily initialize the block
1987 * allocation info here if necessary
1989 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
1990 ext4_init_block_alloc_info(inode);
1992 /* allocate new block */
1993 goal = ext4_ext_find_goal(inode, path, iblock);
1994 allocated = max_blocks;
1995 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
1998 ext_debug("allocate new block: goal "E3FSBLK", found "E3FSBLK"/%lu\n",
1999 goal, newblock, allocated);
2001 /* try to insert new extent into found leaf and return */
2002 newex.ee_block = cpu_to_le32(iblock);
2003 ext4_ext_store_pblock(&newex, newblock);
2004 newex.ee_len = cpu_to_le16(allocated);
2005 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2009 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2010 EXT4_I(inode)->i_disksize = inode->i_size;
2012 /* previous routine could use block we allocated */
2013 newblock = ext_pblock(&newex);
2014 __set_bit(BH_New, &bh_result->b_state);
2016 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2017 EXT4_EXT_CACHE_EXTENT);
2019 if (allocated > max_blocks)
2020 allocated = max_blocks;
2021 ext4_ext_show_leaf(inode, path);
2022 __set_bit(BH_Mapped, &bh_result->b_state);
2023 bh_result->b_bdev = inode->i_sb->s_bdev;
2024 bh_result->b_blocknr = newblock;
2027 ext4_ext_drop_refs(path);
2030 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2032 return err ? err : allocated;
2035 void ext4_ext_truncate(struct inode * inode, struct page *page)
2037 struct address_space *mapping = inode->i_mapping;
2038 struct super_block *sb = inode->i_sb;
2039 unsigned long last_block;
2044 * probably first extent we're gonna free will be last in block
2046 err = ext4_writepage_trans_blocks(inode) + 3;
2047 handle = ext4_journal_start(inode, err);
2048 if (IS_ERR(handle)) {
2050 clear_highpage(page);
2051 flush_dcache_page(page);
2053 page_cache_release(page);
2059 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2061 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2062 ext4_ext_invalidate_cache(inode);
2065 * TODO: optimization is possible here
2066 * probably we need not scaning at all,
2067 * because page truncation is enough
2069 if (ext4_orphan_add(handle, inode))
2072 /* we have to know where to truncate from in crash case */
2073 EXT4_I(inode)->i_disksize = inode->i_size;
2074 ext4_mark_inode_dirty(handle, inode);
2076 last_block = (inode->i_size + sb->s_blocksize - 1)
2077 >> EXT4_BLOCK_SIZE_BITS(sb);
2078 err = ext4_ext_remove_space(inode, last_block);
2080 /* In a multi-transaction truncate, we only make the final
2081 * transaction synchronous */
2087 * If this was a simple ftruncate(), and the file will remain alive
2088 * then we need to clear up the orphan record which we created above.
2089 * However, if this was a real unlink then we were called by
2090 * ext4_delete_inode(), and we allow that function to clean up the
2091 * orphan info for us.
2094 ext4_orphan_del(handle, inode);
2096 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2097 ext4_journal_stop(handle);
2101 * this routine calculate max number of blocks we could modify
2102 * in order to allocate new block for an inode
2104 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2108 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2110 /* caller want to allocate num blocks, but note it includes sb */
2111 needed = needed * num - (num - 1);
2114 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2120 EXPORT_SYMBOL(ext4_mark_inode_dirty);
2121 EXPORT_SYMBOL(ext4_ext_invalidate_cache);
2122 EXPORT_SYMBOL(ext4_ext_insert_extent);
2123 EXPORT_SYMBOL(ext4_ext_walk_space);
2124 EXPORT_SYMBOL(ext4_ext_find_goal);
2125 EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);