V4L/DVB (10868): vino: add note that this conversion is untested.
[linux-2.6] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
46
47
48 /*
49  * ext_pblock:
50  * combine low and high parts of physical block number into ext4_fsblk_t
51  */
52 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
53 {
54         ext4_fsblk_t block;
55
56         block = le32_to_cpu(ex->ee_start_lo);
57         block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58         return block;
59 }
60
61 /*
62  * idx_pblock:
63  * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64  */
65 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
66 {
67         ext4_fsblk_t block;
68
69         block = le32_to_cpu(ix->ei_leaf_lo);
70         block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71         return block;
72 }
73
74 /*
75  * ext4_ext_store_pblock:
76  * stores a large physical block number into an extent struct,
77  * breaking it into parts
78  */
79 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80 {
81         ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82         ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
83 }
84
85 /*
86  * ext4_idx_store_pblock:
87  * stores a large physical block number into an index struct,
88  * breaking it into parts
89  */
90 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91 {
92         ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93         ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
94 }
95
96 static int ext4_ext_journal_restart(handle_t *handle, int needed)
97 {
98         int err;
99
100         if (!ext4_handle_valid(handle))
101                 return 0;
102         if (handle->h_buffer_credits > needed)
103                 return 0;
104         err = ext4_journal_extend(handle, needed);
105         if (err <= 0)
106                 return err;
107         return ext4_journal_restart(handle, needed);
108 }
109
110 /*
111  * could return:
112  *  - EROFS
113  *  - ENOMEM
114  */
115 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
116                                 struct ext4_ext_path *path)
117 {
118         if (path->p_bh) {
119                 /* path points to block */
120                 return ext4_journal_get_write_access(handle, path->p_bh);
121         }
122         /* path points to leaf/index in inode body */
123         /* we use in-core data, no need to protect them */
124         return 0;
125 }
126
127 /*
128  * could return:
129  *  - EROFS
130  *  - ENOMEM
131  *  - EIO
132  */
133 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
134                                 struct ext4_ext_path *path)
135 {
136         int err;
137         if (path->p_bh) {
138                 /* path points to block */
139                 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
140         } else {
141                 /* path points to leaf/index in inode body */
142                 err = ext4_mark_inode_dirty(handle, inode);
143         }
144         return err;
145 }
146
147 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
148                               struct ext4_ext_path *path,
149                               ext4_lblk_t block)
150 {
151         struct ext4_inode_info *ei = EXT4_I(inode);
152         ext4_fsblk_t bg_start;
153         ext4_fsblk_t last_block;
154         ext4_grpblk_t colour;
155         int depth;
156
157         if (path) {
158                 struct ext4_extent *ex;
159                 depth = path->p_depth;
160
161                 /* try to predict block placement */
162                 ex = path[depth].p_ext;
163                 if (ex)
164                         return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
165
166                 /* it looks like index is empty;
167                  * try to find starting block from index itself */
168                 if (path[depth].p_bh)
169                         return path[depth].p_bh->b_blocknr;
170         }
171
172         /* OK. use inode's group */
173         bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
174                 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
175         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
176
177         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
178                 colour = (current->pid % 16) *
179                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
180         else
181                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
182         return bg_start + colour + block;
183 }
184
185 /*
186  * Allocation for a meta data block
187  */
188 static ext4_fsblk_t
189 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
190                         struct ext4_ext_path *path,
191                         struct ext4_extent *ex, int *err)
192 {
193         ext4_fsblk_t goal, newblock;
194
195         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
196         newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
197         return newblock;
198 }
199
200 static int ext4_ext_space_block(struct inode *inode)
201 {
202         int size;
203
204         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
205                         / sizeof(struct ext4_extent);
206 #ifdef AGGRESSIVE_TEST
207         if (size > 6)
208                 size = 6;
209 #endif
210         return size;
211 }
212
213 static int ext4_ext_space_block_idx(struct inode *inode)
214 {
215         int size;
216
217         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
218                         / sizeof(struct ext4_extent_idx);
219 #ifdef AGGRESSIVE_TEST
220         if (size > 5)
221                 size = 5;
222 #endif
223         return size;
224 }
225
226 static int ext4_ext_space_root(struct inode *inode)
227 {
228         int size;
229
230         size = sizeof(EXT4_I(inode)->i_data);
231         size -= sizeof(struct ext4_extent_header);
232         size /= sizeof(struct ext4_extent);
233 #ifdef AGGRESSIVE_TEST
234         if (size > 3)
235                 size = 3;
236 #endif
237         return size;
238 }
239
240 static int ext4_ext_space_root_idx(struct inode *inode)
241 {
242         int size;
243
244         size = sizeof(EXT4_I(inode)->i_data);
245         size -= sizeof(struct ext4_extent_header);
246         size /= sizeof(struct ext4_extent_idx);
247 #ifdef AGGRESSIVE_TEST
248         if (size > 4)
249                 size = 4;
250 #endif
251         return size;
252 }
253
254 /*
255  * Calculate the number of metadata blocks needed
256  * to allocate @blocks
257  * Worse case is one block per extent
258  */
259 int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
260 {
261         int lcap, icap, rcap, leafs, idxs, num;
262         int newextents = blocks;
263
264         rcap = ext4_ext_space_root_idx(inode);
265         lcap = ext4_ext_space_block(inode);
266         icap = ext4_ext_space_block_idx(inode);
267
268         /* number of new leaf blocks needed */
269         num = leafs = (newextents + lcap - 1) / lcap;
270
271         /*
272          * Worse case, we need separate index block(s)
273          * to link all new leaf blocks
274          */
275         idxs = (leafs + icap - 1) / icap;
276         do {
277                 num += idxs;
278                 idxs = (idxs + icap - 1) / icap;
279         } while (idxs > rcap);
280
281         return num;
282 }
283
284 static int
285 ext4_ext_max_entries(struct inode *inode, int depth)
286 {
287         int max;
288
289         if (depth == ext_depth(inode)) {
290                 if (depth == 0)
291                         max = ext4_ext_space_root(inode);
292                 else
293                         max = ext4_ext_space_root_idx(inode);
294         } else {
295                 if (depth == 0)
296                         max = ext4_ext_space_block(inode);
297                 else
298                         max = ext4_ext_space_block_idx(inode);
299         }
300
301         return max;
302 }
303
304 static int __ext4_ext_check_header(const char *function, struct inode *inode,
305                                         struct ext4_extent_header *eh,
306                                         int depth)
307 {
308         const char *error_msg;
309         int max = 0;
310
311         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
312                 error_msg = "invalid magic";
313                 goto corrupted;
314         }
315         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
316                 error_msg = "unexpected eh_depth";
317                 goto corrupted;
318         }
319         if (unlikely(eh->eh_max == 0)) {
320                 error_msg = "invalid eh_max";
321                 goto corrupted;
322         }
323         max = ext4_ext_max_entries(inode, depth);
324         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
325                 error_msg = "too large eh_max";
326                 goto corrupted;
327         }
328         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
329                 error_msg = "invalid eh_entries";
330                 goto corrupted;
331         }
332         return 0;
333
334 corrupted:
335         ext4_error(inode->i_sb, function,
336                         "bad header in inode #%lu: %s - magic %x, "
337                         "entries %u, max %u(%u), depth %u(%u)",
338                         inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
339                         le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
340                         max, le16_to_cpu(eh->eh_depth), depth);
341
342         return -EIO;
343 }
344
345 #define ext4_ext_check_header(inode, eh, depth) \
346         __ext4_ext_check_header(__func__, inode, eh, depth)
347
348 #ifdef EXT_DEBUG
349 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
350 {
351         int k, l = path->p_depth;
352
353         ext_debug("path:");
354         for (k = 0; k <= l; k++, path++) {
355                 if (path->p_idx) {
356                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
357                             idx_pblock(path->p_idx));
358                 } else if (path->p_ext) {
359                         ext_debug("  %d:%d:%llu ",
360                                   le32_to_cpu(path->p_ext->ee_block),
361                                   ext4_ext_get_actual_len(path->p_ext),
362                                   ext_pblock(path->p_ext));
363                 } else
364                         ext_debug("  []");
365         }
366         ext_debug("\n");
367 }
368
369 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
370 {
371         int depth = ext_depth(inode);
372         struct ext4_extent_header *eh;
373         struct ext4_extent *ex;
374         int i;
375
376         if (!path)
377                 return;
378
379         eh = path[depth].p_hdr;
380         ex = EXT_FIRST_EXTENT(eh);
381
382         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
383                 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
384                           ext4_ext_get_actual_len(ex), ext_pblock(ex));
385         }
386         ext_debug("\n");
387 }
388 #else
389 #define ext4_ext_show_path(inode, path)
390 #define ext4_ext_show_leaf(inode, path)
391 #endif
392
393 void ext4_ext_drop_refs(struct ext4_ext_path *path)
394 {
395         int depth = path->p_depth;
396         int i;
397
398         for (i = 0; i <= depth; i++, path++)
399                 if (path->p_bh) {
400                         brelse(path->p_bh);
401                         path->p_bh = NULL;
402                 }
403 }
404
405 /*
406  * ext4_ext_binsearch_idx:
407  * binary search for the closest index of the given block
408  * the header must be checked before calling this
409  */
410 static void
411 ext4_ext_binsearch_idx(struct inode *inode,
412                         struct ext4_ext_path *path, ext4_lblk_t block)
413 {
414         struct ext4_extent_header *eh = path->p_hdr;
415         struct ext4_extent_idx *r, *l, *m;
416
417
418         ext_debug("binsearch for %u(idx):  ", block);
419
420         l = EXT_FIRST_INDEX(eh) + 1;
421         r = EXT_LAST_INDEX(eh);
422         while (l <= r) {
423                 m = l + (r - l) / 2;
424                 if (block < le32_to_cpu(m->ei_block))
425                         r = m - 1;
426                 else
427                         l = m + 1;
428                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
429                                 m, le32_to_cpu(m->ei_block),
430                                 r, le32_to_cpu(r->ei_block));
431         }
432
433         path->p_idx = l - 1;
434         ext_debug("  -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
435                   idx_pblock(path->p_idx));
436
437 #ifdef CHECK_BINSEARCH
438         {
439                 struct ext4_extent_idx *chix, *ix;
440                 int k;
441
442                 chix = ix = EXT_FIRST_INDEX(eh);
443                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
444                   if (k != 0 &&
445                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
446                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
447                                        "first=0x%p\n", k,
448                                        ix, EXT_FIRST_INDEX(eh));
449                                 printk(KERN_DEBUG "%u <= %u\n",
450                                        le32_to_cpu(ix->ei_block),
451                                        le32_to_cpu(ix[-1].ei_block));
452                         }
453                         BUG_ON(k && le32_to_cpu(ix->ei_block)
454                                            <= le32_to_cpu(ix[-1].ei_block));
455                         if (block < le32_to_cpu(ix->ei_block))
456                                 break;
457                         chix = ix;
458                 }
459                 BUG_ON(chix != path->p_idx);
460         }
461 #endif
462
463 }
464
465 /*
466  * ext4_ext_binsearch:
467  * binary search for closest extent of the given block
468  * the header must be checked before calling this
469  */
470 static void
471 ext4_ext_binsearch(struct inode *inode,
472                 struct ext4_ext_path *path, ext4_lblk_t block)
473 {
474         struct ext4_extent_header *eh = path->p_hdr;
475         struct ext4_extent *r, *l, *m;
476
477         if (eh->eh_entries == 0) {
478                 /*
479                  * this leaf is empty:
480                  * we get such a leaf in split/add case
481                  */
482                 return;
483         }
484
485         ext_debug("binsearch for %u:  ", block);
486
487         l = EXT_FIRST_EXTENT(eh) + 1;
488         r = EXT_LAST_EXTENT(eh);
489
490         while (l <= r) {
491                 m = l + (r - l) / 2;
492                 if (block < le32_to_cpu(m->ee_block))
493                         r = m - 1;
494                 else
495                         l = m + 1;
496                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
497                                 m, le32_to_cpu(m->ee_block),
498                                 r, le32_to_cpu(r->ee_block));
499         }
500
501         path->p_ext = l - 1;
502         ext_debug("  -> %d:%llu:%d ",
503                         le32_to_cpu(path->p_ext->ee_block),
504                         ext_pblock(path->p_ext),
505                         ext4_ext_get_actual_len(path->p_ext));
506
507 #ifdef CHECK_BINSEARCH
508         {
509                 struct ext4_extent *chex, *ex;
510                 int k;
511
512                 chex = ex = EXT_FIRST_EXTENT(eh);
513                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
514                         BUG_ON(k && le32_to_cpu(ex->ee_block)
515                                           <= le32_to_cpu(ex[-1].ee_block));
516                         if (block < le32_to_cpu(ex->ee_block))
517                                 break;
518                         chex = ex;
519                 }
520                 BUG_ON(chex != path->p_ext);
521         }
522 #endif
523
524 }
525
526 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
527 {
528         struct ext4_extent_header *eh;
529
530         eh = ext_inode_hdr(inode);
531         eh->eh_depth = 0;
532         eh->eh_entries = 0;
533         eh->eh_magic = EXT4_EXT_MAGIC;
534         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
535         ext4_mark_inode_dirty(handle, inode);
536         ext4_ext_invalidate_cache(inode);
537         return 0;
538 }
539
540 struct ext4_ext_path *
541 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
542                                         struct ext4_ext_path *path)
543 {
544         struct ext4_extent_header *eh;
545         struct buffer_head *bh;
546         short int depth, i, ppos = 0, alloc = 0;
547
548         eh = ext_inode_hdr(inode);
549         depth = ext_depth(inode);
550         if (ext4_ext_check_header(inode, eh, depth))
551                 return ERR_PTR(-EIO);
552
553
554         /* account possible depth increase */
555         if (!path) {
556                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
557                                 GFP_NOFS);
558                 if (!path)
559                         return ERR_PTR(-ENOMEM);
560                 alloc = 1;
561         }
562         path[0].p_hdr = eh;
563         path[0].p_bh = NULL;
564
565         i = depth;
566         /* walk through the tree */
567         while (i) {
568                 ext_debug("depth %d: num %d, max %d\n",
569                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
570
571                 ext4_ext_binsearch_idx(inode, path + ppos, block);
572                 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
573                 path[ppos].p_depth = i;
574                 path[ppos].p_ext = NULL;
575
576                 bh = sb_bread(inode->i_sb, path[ppos].p_block);
577                 if (!bh)
578                         goto err;
579
580                 eh = ext_block_hdr(bh);
581                 ppos++;
582                 BUG_ON(ppos > depth);
583                 path[ppos].p_bh = bh;
584                 path[ppos].p_hdr = eh;
585                 i--;
586
587                 if (ext4_ext_check_header(inode, eh, i))
588                         goto err;
589         }
590
591         path[ppos].p_depth = i;
592         path[ppos].p_ext = NULL;
593         path[ppos].p_idx = NULL;
594
595         /* find extent */
596         ext4_ext_binsearch(inode, path + ppos, block);
597         /* if not an empty leaf */
598         if (path[ppos].p_ext)
599                 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
600
601         ext4_ext_show_path(inode, path);
602
603         return path;
604
605 err:
606         ext4_ext_drop_refs(path);
607         if (alloc)
608                 kfree(path);
609         return ERR_PTR(-EIO);
610 }
611
612 /*
613  * ext4_ext_insert_index:
614  * insert new index [@logical;@ptr] into the block at @curp;
615  * check where to insert: before @curp or after @curp
616  */
617 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
618                                 struct ext4_ext_path *curp,
619                                 int logical, ext4_fsblk_t ptr)
620 {
621         struct ext4_extent_idx *ix;
622         int len, err;
623
624         err = ext4_ext_get_access(handle, inode, curp);
625         if (err)
626                 return err;
627
628         BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
629         len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
630         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
631                 /* insert after */
632                 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
633                         len = (len - 1) * sizeof(struct ext4_extent_idx);
634                         len = len < 0 ? 0 : len;
635                         ext_debug("insert new index %d after: %llu. "
636                                         "move %d from 0x%p to 0x%p\n",
637                                         logical, ptr, len,
638                                         (curp->p_idx + 1), (curp->p_idx + 2));
639                         memmove(curp->p_idx + 2, curp->p_idx + 1, len);
640                 }
641                 ix = curp->p_idx + 1;
642         } else {
643                 /* insert before */
644                 len = len * sizeof(struct ext4_extent_idx);
645                 len = len < 0 ? 0 : len;
646                 ext_debug("insert new index %d before: %llu. "
647                                 "move %d from 0x%p to 0x%p\n",
648                                 logical, ptr, len,
649                                 curp->p_idx, (curp->p_idx + 1));
650                 memmove(curp->p_idx + 1, curp->p_idx, len);
651                 ix = curp->p_idx;
652         }
653
654         ix->ei_block = cpu_to_le32(logical);
655         ext4_idx_store_pblock(ix, ptr);
656         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
657
658         BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
659                              > le16_to_cpu(curp->p_hdr->eh_max));
660         BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
661
662         err = ext4_ext_dirty(handle, inode, curp);
663         ext4_std_error(inode->i_sb, err);
664
665         return err;
666 }
667
668 /*
669  * ext4_ext_split:
670  * inserts new subtree into the path, using free index entry
671  * at depth @at:
672  * - allocates all needed blocks (new leaf and all intermediate index blocks)
673  * - makes decision where to split
674  * - moves remaining extents and index entries (right to the split point)
675  *   into the newly allocated blocks
676  * - initializes subtree
677  */
678 static int ext4_ext_split(handle_t *handle, struct inode *inode,
679                                 struct ext4_ext_path *path,
680                                 struct ext4_extent *newext, int at)
681 {
682         struct buffer_head *bh = NULL;
683         int depth = ext_depth(inode);
684         struct ext4_extent_header *neh;
685         struct ext4_extent_idx *fidx;
686         struct ext4_extent *ex;
687         int i = at, k, m, a;
688         ext4_fsblk_t newblock, oldblock;
689         __le32 border;
690         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
691         int err = 0;
692
693         /* make decision: where to split? */
694         /* FIXME: now decision is simplest: at current extent */
695
696         /* if current leaf will be split, then we should use
697          * border from split point */
698         BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
699         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
700                 border = path[depth].p_ext[1].ee_block;
701                 ext_debug("leaf will be split."
702                                 " next leaf starts at %d\n",
703                                   le32_to_cpu(border));
704         } else {
705                 border = newext->ee_block;
706                 ext_debug("leaf will be added."
707                                 " next leaf starts at %d\n",
708                                 le32_to_cpu(border));
709         }
710
711         /*
712          * If error occurs, then we break processing
713          * and mark filesystem read-only. index won't
714          * be inserted and tree will be in consistent
715          * state. Next mount will repair buffers too.
716          */
717
718         /*
719          * Get array to track all allocated blocks.
720          * We need this to handle errors and free blocks
721          * upon them.
722          */
723         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
724         if (!ablocks)
725                 return -ENOMEM;
726
727         /* allocate all needed blocks */
728         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
729         for (a = 0; a < depth - at; a++) {
730                 newblock = ext4_ext_new_meta_block(handle, inode, path,
731                                                    newext, &err);
732                 if (newblock == 0)
733                         goto cleanup;
734                 ablocks[a] = newblock;
735         }
736
737         /* initialize new leaf */
738         newblock = ablocks[--a];
739         BUG_ON(newblock == 0);
740         bh = sb_getblk(inode->i_sb, newblock);
741         if (!bh) {
742                 err = -EIO;
743                 goto cleanup;
744         }
745         lock_buffer(bh);
746
747         err = ext4_journal_get_create_access(handle, bh);
748         if (err)
749                 goto cleanup;
750
751         neh = ext_block_hdr(bh);
752         neh->eh_entries = 0;
753         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
754         neh->eh_magic = EXT4_EXT_MAGIC;
755         neh->eh_depth = 0;
756         ex = EXT_FIRST_EXTENT(neh);
757
758         /* move remainder of path[depth] to the new leaf */
759         BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
760         /* start copy from next extent */
761         /* TODO: we could do it by single memmove */
762         m = 0;
763         path[depth].p_ext++;
764         while (path[depth].p_ext <=
765                         EXT_MAX_EXTENT(path[depth].p_hdr)) {
766                 ext_debug("move %d:%llu:%d in new leaf %llu\n",
767                                 le32_to_cpu(path[depth].p_ext->ee_block),
768                                 ext_pblock(path[depth].p_ext),
769                                 ext4_ext_get_actual_len(path[depth].p_ext),
770                                 newblock);
771                 /*memmove(ex++, path[depth].p_ext++,
772                                 sizeof(struct ext4_extent));
773                 neh->eh_entries++;*/
774                 path[depth].p_ext++;
775                 m++;
776         }
777         if (m) {
778                 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
779                 le16_add_cpu(&neh->eh_entries, m);
780         }
781
782         set_buffer_uptodate(bh);
783         unlock_buffer(bh);
784
785         err = ext4_handle_dirty_metadata(handle, inode, bh);
786         if (err)
787                 goto cleanup;
788         brelse(bh);
789         bh = NULL;
790
791         /* correct old leaf */
792         if (m) {
793                 err = ext4_ext_get_access(handle, inode, path + depth);
794                 if (err)
795                         goto cleanup;
796                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
797                 err = ext4_ext_dirty(handle, inode, path + depth);
798                 if (err)
799                         goto cleanup;
800
801         }
802
803         /* create intermediate indexes */
804         k = depth - at - 1;
805         BUG_ON(k < 0);
806         if (k)
807                 ext_debug("create %d intermediate indices\n", k);
808         /* insert new index into current index block */
809         /* current depth stored in i var */
810         i = depth - 1;
811         while (k--) {
812                 oldblock = newblock;
813                 newblock = ablocks[--a];
814                 bh = sb_getblk(inode->i_sb, newblock);
815                 if (!bh) {
816                         err = -EIO;
817                         goto cleanup;
818                 }
819                 lock_buffer(bh);
820
821                 err = ext4_journal_get_create_access(handle, bh);
822                 if (err)
823                         goto cleanup;
824
825                 neh = ext_block_hdr(bh);
826                 neh->eh_entries = cpu_to_le16(1);
827                 neh->eh_magic = EXT4_EXT_MAGIC;
828                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
829                 neh->eh_depth = cpu_to_le16(depth - i);
830                 fidx = EXT_FIRST_INDEX(neh);
831                 fidx->ei_block = border;
832                 ext4_idx_store_pblock(fidx, oldblock);
833
834                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
835                                 i, newblock, le32_to_cpu(border), oldblock);
836                 /* copy indexes */
837                 m = 0;
838                 path[i].p_idx++;
839
840                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
841                                 EXT_MAX_INDEX(path[i].p_hdr));
842                 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
843                                 EXT_LAST_INDEX(path[i].p_hdr));
844                 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
845                         ext_debug("%d: move %d:%llu in new index %llu\n", i,
846                                         le32_to_cpu(path[i].p_idx->ei_block),
847                                         idx_pblock(path[i].p_idx),
848                                         newblock);
849                         /*memmove(++fidx, path[i].p_idx++,
850                                         sizeof(struct ext4_extent_idx));
851                         neh->eh_entries++;
852                         BUG_ON(neh->eh_entries > neh->eh_max);*/
853                         path[i].p_idx++;
854                         m++;
855                 }
856                 if (m) {
857                         memmove(++fidx, path[i].p_idx - m,
858                                 sizeof(struct ext4_extent_idx) * m);
859                         le16_add_cpu(&neh->eh_entries, m);
860                 }
861                 set_buffer_uptodate(bh);
862                 unlock_buffer(bh);
863
864                 err = ext4_handle_dirty_metadata(handle, inode, bh);
865                 if (err)
866                         goto cleanup;
867                 brelse(bh);
868                 bh = NULL;
869
870                 /* correct old index */
871                 if (m) {
872                         err = ext4_ext_get_access(handle, inode, path + i);
873                         if (err)
874                                 goto cleanup;
875                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
876                         err = ext4_ext_dirty(handle, inode, path + i);
877                         if (err)
878                                 goto cleanup;
879                 }
880
881                 i--;
882         }
883
884         /* insert new index */
885         err = ext4_ext_insert_index(handle, inode, path + at,
886                                     le32_to_cpu(border), newblock);
887
888 cleanup:
889         if (bh) {
890                 if (buffer_locked(bh))
891                         unlock_buffer(bh);
892                 brelse(bh);
893         }
894
895         if (err) {
896                 /* free all allocated blocks in error case */
897                 for (i = 0; i < depth; i++) {
898                         if (!ablocks[i])
899                                 continue;
900                         ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
901                 }
902         }
903         kfree(ablocks);
904
905         return err;
906 }
907
908 /*
909  * ext4_ext_grow_indepth:
910  * implements tree growing procedure:
911  * - allocates new block
912  * - moves top-level data (index block or leaf) into the new block
913  * - initializes new top-level, creating index that points to the
914  *   just created block
915  */
916 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
917                                         struct ext4_ext_path *path,
918                                         struct ext4_extent *newext)
919 {
920         struct ext4_ext_path *curp = path;
921         struct ext4_extent_header *neh;
922         struct ext4_extent_idx *fidx;
923         struct buffer_head *bh;
924         ext4_fsblk_t newblock;
925         int err = 0;
926
927         newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
928         if (newblock == 0)
929                 return err;
930
931         bh = sb_getblk(inode->i_sb, newblock);
932         if (!bh) {
933                 err = -EIO;
934                 ext4_std_error(inode->i_sb, err);
935                 return err;
936         }
937         lock_buffer(bh);
938
939         err = ext4_journal_get_create_access(handle, bh);
940         if (err) {
941                 unlock_buffer(bh);
942                 goto out;
943         }
944
945         /* move top-level index/leaf into new block */
946         memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
947
948         /* set size of new block */
949         neh = ext_block_hdr(bh);
950         /* old root could have indexes or leaves
951          * so calculate e_max right way */
952         if (ext_depth(inode))
953           neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
954         else
955           neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
956         neh->eh_magic = EXT4_EXT_MAGIC;
957         set_buffer_uptodate(bh);
958         unlock_buffer(bh);
959
960         err = ext4_handle_dirty_metadata(handle, inode, bh);
961         if (err)
962                 goto out;
963
964         /* create index in new top-level index: num,max,pointer */
965         err = ext4_ext_get_access(handle, inode, curp);
966         if (err)
967                 goto out;
968
969         curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
970         curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
971         curp->p_hdr->eh_entries = cpu_to_le16(1);
972         curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
973
974         if (path[0].p_hdr->eh_depth)
975                 curp->p_idx->ei_block =
976                         EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
977         else
978                 curp->p_idx->ei_block =
979                         EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
980         ext4_idx_store_pblock(curp->p_idx, newblock);
981
982         neh = ext_inode_hdr(inode);
983         fidx = EXT_FIRST_INDEX(neh);
984         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
985                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
986                   le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
987
988         neh->eh_depth = cpu_to_le16(path->p_depth + 1);
989         err = ext4_ext_dirty(handle, inode, curp);
990 out:
991         brelse(bh);
992
993         return err;
994 }
995
996 /*
997  * ext4_ext_create_new_leaf:
998  * finds empty index and adds new leaf.
999  * if no free index is found, then it requests in-depth growing.
1000  */
1001 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1002                                         struct ext4_ext_path *path,
1003                                         struct ext4_extent *newext)
1004 {
1005         struct ext4_ext_path *curp;
1006         int depth, i, err = 0;
1007
1008 repeat:
1009         i = depth = ext_depth(inode);
1010
1011         /* walk up to the tree and look for free index entry */
1012         curp = path + depth;
1013         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1014                 i--;
1015                 curp--;
1016         }
1017
1018         /* we use already allocated block for index block,
1019          * so subsequent data blocks should be contiguous */
1020         if (EXT_HAS_FREE_INDEX(curp)) {
1021                 /* if we found index with free entry, then use that
1022                  * entry: create all needed subtree and add new leaf */
1023                 err = ext4_ext_split(handle, inode, path, newext, i);
1024                 if (err)
1025                         goto out;
1026
1027                 /* refill path */
1028                 ext4_ext_drop_refs(path);
1029                 path = ext4_ext_find_extent(inode,
1030                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1031                                     path);
1032                 if (IS_ERR(path))
1033                         err = PTR_ERR(path);
1034         } else {
1035                 /* tree is full, time to grow in depth */
1036                 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1037                 if (err)
1038                         goto out;
1039
1040                 /* refill path */
1041                 ext4_ext_drop_refs(path);
1042                 path = ext4_ext_find_extent(inode,
1043                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1044                                     path);
1045                 if (IS_ERR(path)) {
1046                         err = PTR_ERR(path);
1047                         goto out;
1048                 }
1049
1050                 /*
1051                  * only first (depth 0 -> 1) produces free space;
1052                  * in all other cases we have to split the grown tree
1053                  */
1054                 depth = ext_depth(inode);
1055                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1056                         /* now we need to split */
1057                         goto repeat;
1058                 }
1059         }
1060
1061 out:
1062         return err;
1063 }
1064
1065 /*
1066  * search the closest allocated block to the left for *logical
1067  * and returns it at @logical + it's physical address at @phys
1068  * if *logical is the smallest allocated block, the function
1069  * returns 0 at @phys
1070  * return value contains 0 (success) or error code
1071  */
1072 int
1073 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1074                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1075 {
1076         struct ext4_extent_idx *ix;
1077         struct ext4_extent *ex;
1078         int depth, ee_len;
1079
1080         BUG_ON(path == NULL);
1081         depth = path->p_depth;
1082         *phys = 0;
1083
1084         if (depth == 0 && path->p_ext == NULL)
1085                 return 0;
1086
1087         /* usually extent in the path covers blocks smaller
1088          * then *logical, but it can be that extent is the
1089          * first one in the file */
1090
1091         ex = path[depth].p_ext;
1092         ee_len = ext4_ext_get_actual_len(ex);
1093         if (*logical < le32_to_cpu(ex->ee_block)) {
1094                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1095                 while (--depth >= 0) {
1096                         ix = path[depth].p_idx;
1097                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1098                 }
1099                 return 0;
1100         }
1101
1102         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1103
1104         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1105         *phys = ext_pblock(ex) + ee_len - 1;
1106         return 0;
1107 }
1108
1109 /*
1110  * search the closest allocated block to the right for *logical
1111  * and returns it at @logical + it's physical address at @phys
1112  * if *logical is the smallest allocated block, the function
1113  * returns 0 at @phys
1114  * return value contains 0 (success) or error code
1115  */
1116 int
1117 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1118                         ext4_lblk_t *logical, ext4_fsblk_t *phys)
1119 {
1120         struct buffer_head *bh = NULL;
1121         struct ext4_extent_header *eh;
1122         struct ext4_extent_idx *ix;
1123         struct ext4_extent *ex;
1124         ext4_fsblk_t block;
1125         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1126         int ee_len;
1127
1128         BUG_ON(path == NULL);
1129         depth = path->p_depth;
1130         *phys = 0;
1131
1132         if (depth == 0 && path->p_ext == NULL)
1133                 return 0;
1134
1135         /* usually extent in the path covers blocks smaller
1136          * then *logical, but it can be that extent is the
1137          * first one in the file */
1138
1139         ex = path[depth].p_ext;
1140         ee_len = ext4_ext_get_actual_len(ex);
1141         if (*logical < le32_to_cpu(ex->ee_block)) {
1142                 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1143                 while (--depth >= 0) {
1144                         ix = path[depth].p_idx;
1145                         BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1146                 }
1147                 *logical = le32_to_cpu(ex->ee_block);
1148                 *phys = ext_pblock(ex);
1149                 return 0;
1150         }
1151
1152         BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1153
1154         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1155                 /* next allocated block in this leaf */
1156                 ex++;
1157                 *logical = le32_to_cpu(ex->ee_block);
1158                 *phys = ext_pblock(ex);
1159                 return 0;
1160         }
1161
1162         /* go up and search for index to the right */
1163         while (--depth >= 0) {
1164                 ix = path[depth].p_idx;
1165                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1166                         goto got_index;
1167         }
1168
1169         /* we've gone up to the root and found no index to the right */
1170         return 0;
1171
1172 got_index:
1173         /* we've found index to the right, let's
1174          * follow it and find the closest allocated
1175          * block to the right */
1176         ix++;
1177         block = idx_pblock(ix);
1178         while (++depth < path->p_depth) {
1179                 bh = sb_bread(inode->i_sb, block);
1180                 if (bh == NULL)
1181                         return -EIO;
1182                 eh = ext_block_hdr(bh);
1183                 /* subtract from p_depth to get proper eh_depth */
1184                 if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
1185                         put_bh(bh);
1186                         return -EIO;
1187                 }
1188                 ix = EXT_FIRST_INDEX(eh);
1189                 block = idx_pblock(ix);
1190                 put_bh(bh);
1191         }
1192
1193         bh = sb_bread(inode->i_sb, block);
1194         if (bh == NULL)
1195                 return -EIO;
1196         eh = ext_block_hdr(bh);
1197         if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
1198                 put_bh(bh);
1199                 return -EIO;
1200         }
1201         ex = EXT_FIRST_EXTENT(eh);
1202         *logical = le32_to_cpu(ex->ee_block);
1203         *phys = ext_pblock(ex);
1204         put_bh(bh);
1205         return 0;
1206 }
1207
1208 /*
1209  * ext4_ext_next_allocated_block:
1210  * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1211  * NOTE: it considers block number from index entry as
1212  * allocated block. Thus, index entries have to be consistent
1213  * with leaves.
1214  */
1215 static ext4_lblk_t
1216 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1217 {
1218         int depth;
1219
1220         BUG_ON(path == NULL);
1221         depth = path->p_depth;
1222
1223         if (depth == 0 && path->p_ext == NULL)
1224                 return EXT_MAX_BLOCK;
1225
1226         while (depth >= 0) {
1227                 if (depth == path->p_depth) {
1228                         /* leaf */
1229                         if (path[depth].p_ext !=
1230                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1231                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1232                 } else {
1233                         /* index */
1234                         if (path[depth].p_idx !=
1235                                         EXT_LAST_INDEX(path[depth].p_hdr))
1236                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1237                 }
1238                 depth--;
1239         }
1240
1241         return EXT_MAX_BLOCK;
1242 }
1243
1244 /*
1245  * ext4_ext_next_leaf_block:
1246  * returns first allocated block from next leaf or EXT_MAX_BLOCK
1247  */
1248 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1249                                         struct ext4_ext_path *path)
1250 {
1251         int depth;
1252
1253         BUG_ON(path == NULL);
1254         depth = path->p_depth;
1255
1256         /* zero-tree has no leaf blocks at all */
1257         if (depth == 0)
1258                 return EXT_MAX_BLOCK;
1259
1260         /* go to index block */
1261         depth--;
1262
1263         while (depth >= 0) {
1264                 if (path[depth].p_idx !=
1265                                 EXT_LAST_INDEX(path[depth].p_hdr))
1266                         return (ext4_lblk_t)
1267                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1268                 depth--;
1269         }
1270
1271         return EXT_MAX_BLOCK;
1272 }
1273
1274 /*
1275  * ext4_ext_correct_indexes:
1276  * if leaf gets modified and modified extent is first in the leaf,
1277  * then we have to correct all indexes above.
1278  * TODO: do we need to correct tree in all cases?
1279  */
1280 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1281                                 struct ext4_ext_path *path)
1282 {
1283         struct ext4_extent_header *eh;
1284         int depth = ext_depth(inode);
1285         struct ext4_extent *ex;
1286         __le32 border;
1287         int k, err = 0;
1288
1289         eh = path[depth].p_hdr;
1290         ex = path[depth].p_ext;
1291         BUG_ON(ex == NULL);
1292         BUG_ON(eh == NULL);
1293
1294         if (depth == 0) {
1295                 /* there is no tree at all */
1296                 return 0;
1297         }
1298
1299         if (ex != EXT_FIRST_EXTENT(eh)) {
1300                 /* we correct tree if first leaf got modified only */
1301                 return 0;
1302         }
1303
1304         /*
1305          * TODO: we need correction if border is smaller than current one
1306          */
1307         k = depth - 1;
1308         border = path[depth].p_ext->ee_block;
1309         err = ext4_ext_get_access(handle, inode, path + k);
1310         if (err)
1311                 return err;
1312         path[k].p_idx->ei_block = border;
1313         err = ext4_ext_dirty(handle, inode, path + k);
1314         if (err)
1315                 return err;
1316
1317         while (k--) {
1318                 /* change all left-side indexes */
1319                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1320                         break;
1321                 err = ext4_ext_get_access(handle, inode, path + k);
1322                 if (err)
1323                         break;
1324                 path[k].p_idx->ei_block = border;
1325                 err = ext4_ext_dirty(handle, inode, path + k);
1326                 if (err)
1327                         break;
1328         }
1329
1330         return err;
1331 }
1332
1333 static int
1334 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1335                                 struct ext4_extent *ex2)
1336 {
1337         unsigned short ext1_ee_len, ext2_ee_len, max_len;
1338
1339         /*
1340          * Make sure that either both extents are uninitialized, or
1341          * both are _not_.
1342          */
1343         if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1344                 return 0;
1345
1346         if (ext4_ext_is_uninitialized(ex1))
1347                 max_len = EXT_UNINIT_MAX_LEN;
1348         else
1349                 max_len = EXT_INIT_MAX_LEN;
1350
1351         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1352         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1353
1354         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1355                         le32_to_cpu(ex2->ee_block))
1356                 return 0;
1357
1358         /*
1359          * To allow future support for preallocated extents to be added
1360          * as an RO_COMPAT feature, refuse to merge to extents if
1361          * this can result in the top bit of ee_len being set.
1362          */
1363         if (ext1_ee_len + ext2_ee_len > max_len)
1364                 return 0;
1365 #ifdef AGGRESSIVE_TEST
1366         if (ext1_ee_len >= 4)
1367                 return 0;
1368 #endif
1369
1370         if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1371                 return 1;
1372         return 0;
1373 }
1374
1375 /*
1376  * This function tries to merge the "ex" extent to the next extent in the tree.
1377  * It always tries to merge towards right. If you want to merge towards
1378  * left, pass "ex - 1" as argument instead of "ex".
1379  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1380  * 1 if they got merged.
1381  */
1382 int ext4_ext_try_to_merge(struct inode *inode,
1383                           struct ext4_ext_path *path,
1384                           struct ext4_extent *ex)
1385 {
1386         struct ext4_extent_header *eh;
1387         unsigned int depth, len;
1388         int merge_done = 0;
1389         int uninitialized = 0;
1390
1391         depth = ext_depth(inode);
1392         BUG_ON(path[depth].p_hdr == NULL);
1393         eh = path[depth].p_hdr;
1394
1395         while (ex < EXT_LAST_EXTENT(eh)) {
1396                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1397                         break;
1398                 /* merge with next extent! */
1399                 if (ext4_ext_is_uninitialized(ex))
1400                         uninitialized = 1;
1401                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1402                                 + ext4_ext_get_actual_len(ex + 1));
1403                 if (uninitialized)
1404                         ext4_ext_mark_uninitialized(ex);
1405
1406                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1407                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1408                                 * sizeof(struct ext4_extent);
1409                         memmove(ex + 1, ex + 2, len);
1410                 }
1411                 le16_add_cpu(&eh->eh_entries, -1);
1412                 merge_done = 1;
1413                 WARN_ON(eh->eh_entries == 0);
1414                 if (!eh->eh_entries)
1415                         ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1416                            "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1417         }
1418
1419         return merge_done;
1420 }
1421
1422 /*
1423  * check if a portion of the "newext" extent overlaps with an
1424  * existing extent.
1425  *
1426  * If there is an overlap discovered, it updates the length of the newext
1427  * such that there will be no overlap, and then returns 1.
1428  * If there is no overlap found, it returns 0.
1429  */
1430 unsigned int ext4_ext_check_overlap(struct inode *inode,
1431                                     struct ext4_extent *newext,
1432                                     struct ext4_ext_path *path)
1433 {
1434         ext4_lblk_t b1, b2;
1435         unsigned int depth, len1;
1436         unsigned int ret = 0;
1437
1438         b1 = le32_to_cpu(newext->ee_block);
1439         len1 = ext4_ext_get_actual_len(newext);
1440         depth = ext_depth(inode);
1441         if (!path[depth].p_ext)
1442                 goto out;
1443         b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1444
1445         /*
1446          * get the next allocated block if the extent in the path
1447          * is before the requested block(s)
1448          */
1449         if (b2 < b1) {
1450                 b2 = ext4_ext_next_allocated_block(path);
1451                 if (b2 == EXT_MAX_BLOCK)
1452                         goto out;
1453         }
1454
1455         /* check for wrap through zero on extent logical start block*/
1456         if (b1 + len1 < b1) {
1457                 len1 = EXT_MAX_BLOCK - b1;
1458                 newext->ee_len = cpu_to_le16(len1);
1459                 ret = 1;
1460         }
1461
1462         /* check for overlap */
1463         if (b1 + len1 > b2) {
1464                 newext->ee_len = cpu_to_le16(b2 - b1);
1465                 ret = 1;
1466         }
1467 out:
1468         return ret;
1469 }
1470
1471 /*
1472  * ext4_ext_insert_extent:
1473  * tries to merge requsted extent into the existing extent or
1474  * inserts requested extent as new one into the tree,
1475  * creating new leaf in the no-space case.
1476  */
1477 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1478                                 struct ext4_ext_path *path,
1479                                 struct ext4_extent *newext)
1480 {
1481         struct ext4_extent_header *eh;
1482         struct ext4_extent *ex, *fex;
1483         struct ext4_extent *nearex; /* nearest extent */
1484         struct ext4_ext_path *npath = NULL;
1485         int depth, len, err;
1486         ext4_lblk_t next;
1487         unsigned uninitialized = 0;
1488
1489         BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1490         depth = ext_depth(inode);
1491         ex = path[depth].p_ext;
1492         BUG_ON(path[depth].p_hdr == NULL);
1493
1494         /* try to insert block into found extent and return */
1495         if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1496                 ext_debug("append %d block to %d:%d (from %llu)\n",
1497                                 ext4_ext_get_actual_len(newext),
1498                                 le32_to_cpu(ex->ee_block),
1499                                 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1500                 err = ext4_ext_get_access(handle, inode, path + depth);
1501                 if (err)
1502                         return err;
1503
1504                 /*
1505                  * ext4_can_extents_be_merged should have checked that either
1506                  * both extents are uninitialized, or both aren't. Thus we
1507                  * need to check only one of them here.
1508                  */
1509                 if (ext4_ext_is_uninitialized(ex))
1510                         uninitialized = 1;
1511                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1512                                         + ext4_ext_get_actual_len(newext));
1513                 if (uninitialized)
1514                         ext4_ext_mark_uninitialized(ex);
1515                 eh = path[depth].p_hdr;
1516                 nearex = ex;
1517                 goto merge;
1518         }
1519
1520 repeat:
1521         depth = ext_depth(inode);
1522         eh = path[depth].p_hdr;
1523         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1524                 goto has_space;
1525
1526         /* probably next leaf has space for us? */
1527         fex = EXT_LAST_EXTENT(eh);
1528         next = ext4_ext_next_leaf_block(inode, path);
1529         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1530             && next != EXT_MAX_BLOCK) {
1531                 ext_debug("next leaf block - %d\n", next);
1532                 BUG_ON(npath != NULL);
1533                 npath = ext4_ext_find_extent(inode, next, NULL);
1534                 if (IS_ERR(npath))
1535                         return PTR_ERR(npath);
1536                 BUG_ON(npath->p_depth != path->p_depth);
1537                 eh = npath[depth].p_hdr;
1538                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1539                         ext_debug("next leaf isnt full(%d)\n",
1540                                   le16_to_cpu(eh->eh_entries));
1541                         path = npath;
1542                         goto repeat;
1543                 }
1544                 ext_debug("next leaf has no free space(%d,%d)\n",
1545                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1546         }
1547
1548         /*
1549          * There is no free space in the found leaf.
1550          * We're gonna add a new leaf in the tree.
1551          */
1552         err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1553         if (err)
1554                 goto cleanup;
1555         depth = ext_depth(inode);
1556         eh = path[depth].p_hdr;
1557
1558 has_space:
1559         nearex = path[depth].p_ext;
1560
1561         err = ext4_ext_get_access(handle, inode, path + depth);
1562         if (err)
1563                 goto cleanup;
1564
1565         if (!nearex) {
1566                 /* there is no extent in this leaf, create first one */
1567                 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1568                                 le32_to_cpu(newext->ee_block),
1569                                 ext_pblock(newext),
1570                                 ext4_ext_get_actual_len(newext));
1571                 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1572         } else if (le32_to_cpu(newext->ee_block)
1573                            > le32_to_cpu(nearex->ee_block)) {
1574 /*              BUG_ON(newext->ee_block == nearex->ee_block); */
1575                 if (nearex != EXT_LAST_EXTENT(eh)) {
1576                         len = EXT_MAX_EXTENT(eh) - nearex;
1577                         len = (len - 1) * sizeof(struct ext4_extent);
1578                         len = len < 0 ? 0 : len;
1579                         ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1580                                         "move %d from 0x%p to 0x%p\n",
1581                                         le32_to_cpu(newext->ee_block),
1582                                         ext_pblock(newext),
1583                                         ext4_ext_get_actual_len(newext),
1584                                         nearex, len, nearex + 1, nearex + 2);
1585                         memmove(nearex + 2, nearex + 1, len);
1586                 }
1587                 path[depth].p_ext = nearex + 1;
1588         } else {
1589                 BUG_ON(newext->ee_block == nearex->ee_block);
1590                 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1591                 len = len < 0 ? 0 : len;
1592                 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1593                                 "move %d from 0x%p to 0x%p\n",
1594                                 le32_to_cpu(newext->ee_block),
1595                                 ext_pblock(newext),
1596                                 ext4_ext_get_actual_len(newext),
1597                                 nearex, len, nearex + 1, nearex + 2);
1598                 memmove(nearex + 1, nearex, len);
1599                 path[depth].p_ext = nearex;
1600         }
1601
1602         le16_add_cpu(&eh->eh_entries, 1);
1603         nearex = path[depth].p_ext;
1604         nearex->ee_block = newext->ee_block;
1605         ext4_ext_store_pblock(nearex, ext_pblock(newext));
1606         nearex->ee_len = newext->ee_len;
1607
1608 merge:
1609         /* try to merge extents to the right */
1610         ext4_ext_try_to_merge(inode, path, nearex);
1611
1612         /* try to merge extents to the left */
1613
1614         /* time to correct all indexes above */
1615         err = ext4_ext_correct_indexes(handle, inode, path);
1616         if (err)
1617                 goto cleanup;
1618
1619         err = ext4_ext_dirty(handle, inode, path + depth);
1620
1621 cleanup:
1622         if (npath) {
1623                 ext4_ext_drop_refs(npath);
1624                 kfree(npath);
1625         }
1626         ext4_ext_invalidate_cache(inode);
1627         return err;
1628 }
1629
1630 int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1631                         ext4_lblk_t num, ext_prepare_callback func,
1632                         void *cbdata)
1633 {
1634         struct ext4_ext_path *path = NULL;
1635         struct ext4_ext_cache cbex;
1636         struct ext4_extent *ex;
1637         ext4_lblk_t next, start = 0, end = 0;
1638         ext4_lblk_t last = block + num;
1639         int depth, exists, err = 0;
1640
1641         BUG_ON(func == NULL);
1642         BUG_ON(inode == NULL);
1643
1644         while (block < last && block != EXT_MAX_BLOCK) {
1645                 num = last - block;
1646                 /* find extent for this block */
1647                 path = ext4_ext_find_extent(inode, block, path);
1648                 if (IS_ERR(path)) {
1649                         err = PTR_ERR(path);
1650                         path = NULL;
1651                         break;
1652                 }
1653
1654                 depth = ext_depth(inode);
1655                 BUG_ON(path[depth].p_hdr == NULL);
1656                 ex = path[depth].p_ext;
1657                 next = ext4_ext_next_allocated_block(path);
1658
1659                 exists = 0;
1660                 if (!ex) {
1661                         /* there is no extent yet, so try to allocate
1662                          * all requested space */
1663                         start = block;
1664                         end = block + num;
1665                 } else if (le32_to_cpu(ex->ee_block) > block) {
1666                         /* need to allocate space before found extent */
1667                         start = block;
1668                         end = le32_to_cpu(ex->ee_block);
1669                         if (block + num < end)
1670                                 end = block + num;
1671                 } else if (block >= le32_to_cpu(ex->ee_block)
1672                                         + ext4_ext_get_actual_len(ex)) {
1673                         /* need to allocate space after found extent */
1674                         start = block;
1675                         end = block + num;
1676                         if (end >= next)
1677                                 end = next;
1678                 } else if (block >= le32_to_cpu(ex->ee_block)) {
1679                         /*
1680                          * some part of requested space is covered
1681                          * by found extent
1682                          */
1683                         start = block;
1684                         end = le32_to_cpu(ex->ee_block)
1685                                 + ext4_ext_get_actual_len(ex);
1686                         if (block + num < end)
1687                                 end = block + num;
1688                         exists = 1;
1689                 } else {
1690                         BUG();
1691                 }
1692                 BUG_ON(end <= start);
1693
1694                 if (!exists) {
1695                         cbex.ec_block = start;
1696                         cbex.ec_len = end - start;
1697                         cbex.ec_start = 0;
1698                         cbex.ec_type = EXT4_EXT_CACHE_GAP;
1699                 } else {
1700                         cbex.ec_block = le32_to_cpu(ex->ee_block);
1701                         cbex.ec_len = ext4_ext_get_actual_len(ex);
1702                         cbex.ec_start = ext_pblock(ex);
1703                         cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1704                 }
1705
1706                 BUG_ON(cbex.ec_len == 0);
1707                 err = func(inode, path, &cbex, ex, cbdata);
1708                 ext4_ext_drop_refs(path);
1709
1710                 if (err < 0)
1711                         break;
1712
1713                 if (err == EXT_REPEAT)
1714                         continue;
1715                 else if (err == EXT_BREAK) {
1716                         err = 0;
1717                         break;
1718                 }
1719
1720                 if (ext_depth(inode) != depth) {
1721                         /* depth was changed. we have to realloc path */
1722                         kfree(path);
1723                         path = NULL;
1724                 }
1725
1726                 block = cbex.ec_block + cbex.ec_len;
1727         }
1728
1729         if (path) {
1730                 ext4_ext_drop_refs(path);
1731                 kfree(path);
1732         }
1733
1734         return err;
1735 }
1736
1737 static void
1738 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1739                         __u32 len, ext4_fsblk_t start, int type)
1740 {
1741         struct ext4_ext_cache *cex;
1742         BUG_ON(len == 0);
1743         cex = &EXT4_I(inode)->i_cached_extent;
1744         cex->ec_type = type;
1745         cex->ec_block = block;
1746         cex->ec_len = len;
1747         cex->ec_start = start;
1748 }
1749
1750 /*
1751  * ext4_ext_put_gap_in_cache:
1752  * calculate boundaries of the gap that the requested block fits into
1753  * and cache this gap
1754  */
1755 static void
1756 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1757                                 ext4_lblk_t block)
1758 {
1759         int depth = ext_depth(inode);
1760         unsigned long len;
1761         ext4_lblk_t lblock;
1762         struct ext4_extent *ex;
1763
1764         ex = path[depth].p_ext;
1765         if (ex == NULL) {
1766                 /* there is no extent yet, so gap is [0;-] */
1767                 lblock = 0;
1768                 len = EXT_MAX_BLOCK;
1769                 ext_debug("cache gap(whole file):");
1770         } else if (block < le32_to_cpu(ex->ee_block)) {
1771                 lblock = block;
1772                 len = le32_to_cpu(ex->ee_block) - block;
1773                 ext_debug("cache gap(before): %u [%u:%u]",
1774                                 block,
1775                                 le32_to_cpu(ex->ee_block),
1776                                  ext4_ext_get_actual_len(ex));
1777         } else if (block >= le32_to_cpu(ex->ee_block)
1778                         + ext4_ext_get_actual_len(ex)) {
1779                 ext4_lblk_t next;
1780                 lblock = le32_to_cpu(ex->ee_block)
1781                         + ext4_ext_get_actual_len(ex);
1782
1783                 next = ext4_ext_next_allocated_block(path);
1784                 ext_debug("cache gap(after): [%u:%u] %u",
1785                                 le32_to_cpu(ex->ee_block),
1786                                 ext4_ext_get_actual_len(ex),
1787                                 block);
1788                 BUG_ON(next == lblock);
1789                 len = next - lblock;
1790         } else {
1791                 lblock = len = 0;
1792                 BUG();
1793         }
1794
1795         ext_debug(" -> %u:%lu\n", lblock, len);
1796         ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1797 }
1798
1799 static int
1800 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1801                         struct ext4_extent *ex)
1802 {
1803         struct ext4_ext_cache *cex;
1804
1805         cex = &EXT4_I(inode)->i_cached_extent;
1806
1807         /* has cache valid data? */
1808         if (cex->ec_type == EXT4_EXT_CACHE_NO)
1809                 return EXT4_EXT_CACHE_NO;
1810
1811         BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1812                         cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1813         if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1814                 ex->ee_block = cpu_to_le32(cex->ec_block);
1815                 ext4_ext_store_pblock(ex, cex->ec_start);
1816                 ex->ee_len = cpu_to_le16(cex->ec_len);
1817                 ext_debug("%u cached by %u:%u:%llu\n",
1818                                 block,
1819                                 cex->ec_block, cex->ec_len, cex->ec_start);
1820                 return cex->ec_type;
1821         }
1822
1823         /* not in cache */
1824         return EXT4_EXT_CACHE_NO;
1825 }
1826
1827 /*
1828  * ext4_ext_rm_idx:
1829  * removes index from the index block.
1830  * It's used in truncate case only, thus all requests are for
1831  * last index in the block only.
1832  */
1833 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1834                         struct ext4_ext_path *path)
1835 {
1836         struct buffer_head *bh;
1837         int err;
1838         ext4_fsblk_t leaf;
1839
1840         /* free index block */
1841         path--;
1842         leaf = idx_pblock(path->p_idx);
1843         BUG_ON(path->p_hdr->eh_entries == 0);
1844         err = ext4_ext_get_access(handle, inode, path);
1845         if (err)
1846                 return err;
1847         le16_add_cpu(&path->p_hdr->eh_entries, -1);
1848         err = ext4_ext_dirty(handle, inode, path);
1849         if (err)
1850                 return err;
1851         ext_debug("index is empty, remove it, free block %llu\n", leaf);
1852         bh = sb_find_get_block(inode->i_sb, leaf);
1853         ext4_forget(handle, 1, inode, bh, leaf);
1854         ext4_free_blocks(handle, inode, leaf, 1, 1);
1855         return err;
1856 }
1857
1858 /*
1859  * ext4_ext_calc_credits_for_single_extent:
1860  * This routine returns max. credits that needed to insert an extent
1861  * to the extent tree.
1862  * When pass the actual path, the caller should calculate credits
1863  * under i_data_sem.
1864  */
1865 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
1866                                                 struct ext4_ext_path *path)
1867 {
1868         if (path) {
1869                 int depth = ext_depth(inode);
1870                 int ret = 0;
1871
1872                 /* probably there is space in leaf? */
1873                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1874                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
1875
1876                         /*
1877                          *  There are some space in the leaf tree, no
1878                          *  need to account for leaf block credit
1879                          *
1880                          *  bitmaps and block group descriptor blocks
1881                          *  and other metadat blocks still need to be
1882                          *  accounted.
1883                          */
1884                         /* 1 bitmap, 1 block group descriptor */
1885                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
1886                 }
1887         }
1888
1889         return ext4_chunk_trans_blocks(inode, nrblocks);
1890 }
1891
1892 /*
1893  * How many index/leaf blocks need to change/allocate to modify nrblocks?
1894  *
1895  * if nrblocks are fit in a single extent (chunk flag is 1), then
1896  * in the worse case, each tree level index/leaf need to be changed
1897  * if the tree split due to insert a new extent, then the old tree
1898  * index/leaf need to be updated too
1899  *
1900  * If the nrblocks are discontiguous, they could cause
1901  * the whole tree split more than once, but this is really rare.
1902  */
1903 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
1904 {
1905         int index;
1906         int depth = ext_depth(inode);
1907
1908         if (chunk)
1909                 index = depth * 2;
1910         else
1911                 index = depth * 3;
1912
1913         return index;
1914 }
1915
1916 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1917                                 struct ext4_extent *ex,
1918                                 ext4_lblk_t from, ext4_lblk_t to)
1919 {
1920         struct buffer_head *bh;
1921         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
1922         int i, metadata = 0;
1923
1924         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1925                 metadata = 1;
1926 #ifdef EXTENTS_STATS
1927         {
1928                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1929                 spin_lock(&sbi->s_ext_stats_lock);
1930                 sbi->s_ext_blocks += ee_len;
1931                 sbi->s_ext_extents++;
1932                 if (ee_len < sbi->s_ext_min)
1933                         sbi->s_ext_min = ee_len;
1934                 if (ee_len > sbi->s_ext_max)
1935                         sbi->s_ext_max = ee_len;
1936                 if (ext_depth(inode) > sbi->s_depth_max)
1937                         sbi->s_depth_max = ext_depth(inode);
1938                 spin_unlock(&sbi->s_ext_stats_lock);
1939         }
1940 #endif
1941         if (from >= le32_to_cpu(ex->ee_block)
1942             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1943                 /* tail removal */
1944                 ext4_lblk_t num;
1945                 ext4_fsblk_t start;
1946
1947                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
1948                 start = ext_pblock(ex) + ee_len - num;
1949                 ext_debug("free last %u blocks starting %llu\n", num, start);
1950                 for (i = 0; i < num; i++) {
1951                         bh = sb_find_get_block(inode->i_sb, start + i);
1952                         ext4_forget(handle, 0, inode, bh, start + i);
1953                 }
1954                 ext4_free_blocks(handle, inode, start, num, metadata);
1955         } else if (from == le32_to_cpu(ex->ee_block)
1956                    && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1957                 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
1958                         from, to, le32_to_cpu(ex->ee_block), ee_len);
1959         } else {
1960                 printk(KERN_INFO "strange request: removal(2) "
1961                                 "%u-%u from %u:%u\n",
1962                                 from, to, le32_to_cpu(ex->ee_block), ee_len);
1963         }
1964         return 0;
1965 }
1966
1967 static int
1968 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1969                 struct ext4_ext_path *path, ext4_lblk_t start)
1970 {
1971         int err = 0, correct_index = 0;
1972         int depth = ext_depth(inode), credits;
1973         struct ext4_extent_header *eh;
1974         ext4_lblk_t a, b, block;
1975         unsigned num;
1976         ext4_lblk_t ex_ee_block;
1977         unsigned short ex_ee_len;
1978         unsigned uninitialized = 0;
1979         struct ext4_extent *ex;
1980
1981         /* the header must be checked already in ext4_ext_remove_space() */
1982         ext_debug("truncate since %u in leaf\n", start);
1983         if (!path[depth].p_hdr)
1984                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1985         eh = path[depth].p_hdr;
1986         BUG_ON(eh == NULL);
1987
1988         /* find where to start removing */
1989         ex = EXT_LAST_EXTENT(eh);
1990
1991         ex_ee_block = le32_to_cpu(ex->ee_block);
1992         if (ext4_ext_is_uninitialized(ex))
1993                 uninitialized = 1;
1994         ex_ee_len = ext4_ext_get_actual_len(ex);
1995
1996         while (ex >= EXT_FIRST_EXTENT(eh) &&
1997                         ex_ee_block + ex_ee_len > start) {
1998                 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1999                 path[depth].p_ext = ex;
2000
2001                 a = ex_ee_block > start ? ex_ee_block : start;
2002                 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2003                         ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2004
2005                 ext_debug("  border %u:%u\n", a, b);
2006
2007                 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2008                         block = 0;
2009                         num = 0;
2010                         BUG();
2011                 } else if (a != ex_ee_block) {
2012                         /* remove tail of the extent */
2013                         block = ex_ee_block;
2014                         num = a - block;
2015                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2016                         /* remove head of the extent */
2017                         block = a;
2018                         num = b - a;
2019                         /* there is no "make a hole" API yet */
2020                         BUG();
2021                 } else {
2022                         /* remove whole extent: excellent! */
2023                         block = ex_ee_block;
2024                         num = 0;
2025                         BUG_ON(a != ex_ee_block);
2026                         BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2027                 }
2028
2029                 /*
2030                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2031                  * descriptor) for each block group; assume two block
2032                  * groups plus ex_ee_len/blocks_per_block_group for
2033                  * the worst case
2034                  */
2035                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2036                 if (ex == EXT_FIRST_EXTENT(eh)) {
2037                         correct_index = 1;
2038                         credits += (ext_depth(inode)) + 1;
2039                 }
2040                 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2041
2042                 err = ext4_ext_journal_restart(handle, credits);
2043                 if (err)
2044                         goto out;
2045
2046                 err = ext4_ext_get_access(handle, inode, path + depth);
2047                 if (err)
2048                         goto out;
2049
2050                 err = ext4_remove_blocks(handle, inode, ex, a, b);
2051                 if (err)
2052                         goto out;
2053
2054                 if (num == 0) {
2055                         /* this extent is removed; mark slot entirely unused */
2056                         ext4_ext_store_pblock(ex, 0);
2057                         le16_add_cpu(&eh->eh_entries, -1);
2058                 }
2059
2060                 ex->ee_block = cpu_to_le32(block);
2061                 ex->ee_len = cpu_to_le16(num);
2062                 /*
2063                  * Do not mark uninitialized if all the blocks in the
2064                  * extent have been removed.
2065                  */
2066                 if (uninitialized && num)
2067                         ext4_ext_mark_uninitialized(ex);
2068
2069                 err = ext4_ext_dirty(handle, inode, path + depth);
2070                 if (err)
2071                         goto out;
2072
2073                 ext_debug("new extent: %u:%u:%llu\n", block, num,
2074                                 ext_pblock(ex));
2075                 ex--;
2076                 ex_ee_block = le32_to_cpu(ex->ee_block);
2077                 ex_ee_len = ext4_ext_get_actual_len(ex);
2078         }
2079
2080         if (correct_index && eh->eh_entries)
2081                 err = ext4_ext_correct_indexes(handle, inode, path);
2082
2083         /* if this leaf is free, then we should
2084          * remove it from index block above */
2085         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2086                 err = ext4_ext_rm_idx(handle, inode, path + depth);
2087
2088 out:
2089         return err;
2090 }
2091
2092 /*
2093  * ext4_ext_more_to_rm:
2094  * returns 1 if current index has to be freed (even partial)
2095  */
2096 static int
2097 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2098 {
2099         BUG_ON(path->p_idx == NULL);
2100
2101         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2102                 return 0;
2103
2104         /*
2105          * if truncate on deeper level happened, it wasn't partial,
2106          * so we have to consider current index for truncation
2107          */
2108         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2109                 return 0;
2110         return 1;
2111 }
2112
2113 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2114 {
2115         struct super_block *sb = inode->i_sb;
2116         int depth = ext_depth(inode);
2117         struct ext4_ext_path *path;
2118         handle_t *handle;
2119         int i = 0, err = 0;
2120
2121         ext_debug("truncate since %u\n", start);
2122
2123         /* probably first extent we're gonna free will be last in block */
2124         handle = ext4_journal_start(inode, depth + 1);
2125         if (IS_ERR(handle))
2126                 return PTR_ERR(handle);
2127
2128         ext4_ext_invalidate_cache(inode);
2129
2130         /*
2131          * We start scanning from right side, freeing all the blocks
2132          * after i_size and walking into the tree depth-wise.
2133          */
2134         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2135         if (path == NULL) {
2136                 ext4_journal_stop(handle);
2137                 return -ENOMEM;
2138         }
2139         path[0].p_hdr = ext_inode_hdr(inode);
2140         if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
2141                 err = -EIO;
2142                 goto out;
2143         }
2144         path[0].p_depth = depth;
2145
2146         while (i >= 0 && err == 0) {
2147                 if (i == depth) {
2148                         /* this is leaf block */
2149                         err = ext4_ext_rm_leaf(handle, inode, path, start);
2150                         /* root level has p_bh == NULL, brelse() eats this */
2151                         brelse(path[i].p_bh);
2152                         path[i].p_bh = NULL;
2153                         i--;
2154                         continue;
2155                 }
2156
2157                 /* this is index block */
2158                 if (!path[i].p_hdr) {
2159                         ext_debug("initialize header\n");
2160                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2161                 }
2162
2163                 if (!path[i].p_idx) {
2164                         /* this level hasn't been touched yet */
2165                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2166                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2167                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2168                                   path[i].p_hdr,
2169                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2170                 } else {
2171                         /* we were already here, see at next index */
2172                         path[i].p_idx--;
2173                 }
2174
2175                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2176                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2177                                 path[i].p_idx);
2178                 if (ext4_ext_more_to_rm(path + i)) {
2179                         struct buffer_head *bh;
2180                         /* go to the next level */
2181                         ext_debug("move to level %d (block %llu)\n",
2182                                   i + 1, idx_pblock(path[i].p_idx));
2183                         memset(path + i + 1, 0, sizeof(*path));
2184                         bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2185                         if (!bh) {
2186                                 /* should we reset i_size? */
2187                                 err = -EIO;
2188                                 break;
2189                         }
2190                         if (WARN_ON(i + 1 > depth)) {
2191                                 err = -EIO;
2192                                 break;
2193                         }
2194                         if (ext4_ext_check_header(inode, ext_block_hdr(bh),
2195                                                         depth - i - 1)) {
2196                                 err = -EIO;
2197                                 break;
2198                         }
2199                         path[i + 1].p_bh = bh;
2200
2201                         /* save actual number of indexes since this
2202                          * number is changed at the next iteration */
2203                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2204                         i++;
2205                 } else {
2206                         /* we finished processing this index, go up */
2207                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2208                                 /* index is empty, remove it;
2209                                  * handle must be already prepared by the
2210                                  * truncatei_leaf() */
2211                                 err = ext4_ext_rm_idx(handle, inode, path + i);
2212                         }
2213                         /* root level has p_bh == NULL, brelse() eats this */
2214                         brelse(path[i].p_bh);
2215                         path[i].p_bh = NULL;
2216                         i--;
2217                         ext_debug("return to level %d\n", i);
2218                 }
2219         }
2220
2221         /* TODO: flexible tree reduction should be here */
2222         if (path->p_hdr->eh_entries == 0) {
2223                 /*
2224                  * truncate to zero freed all the tree,
2225                  * so we need to correct eh_depth
2226                  */
2227                 err = ext4_ext_get_access(handle, inode, path);
2228                 if (err == 0) {
2229                         ext_inode_hdr(inode)->eh_depth = 0;
2230                         ext_inode_hdr(inode)->eh_max =
2231                                 cpu_to_le16(ext4_ext_space_root(inode));
2232                         err = ext4_ext_dirty(handle, inode, path);
2233                 }
2234         }
2235 out:
2236         ext4_ext_drop_refs(path);
2237         kfree(path);
2238         ext4_journal_stop(handle);
2239
2240         return err;
2241 }
2242
2243 /*
2244  * called at mount time
2245  */
2246 void ext4_ext_init(struct super_block *sb)
2247 {
2248         /*
2249          * possible initialization would be here
2250          */
2251
2252         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2253                 printk(KERN_INFO "EXT4-fs: file extents enabled");
2254 #ifdef AGGRESSIVE_TEST
2255                 printk(", aggressive tests");
2256 #endif
2257 #ifdef CHECK_BINSEARCH
2258                 printk(", check binsearch");
2259 #endif
2260 #ifdef EXTENTS_STATS
2261                 printk(", stats");
2262 #endif
2263                 printk("\n");
2264 #ifdef EXTENTS_STATS
2265                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2266                 EXT4_SB(sb)->s_ext_min = 1 << 30;
2267                 EXT4_SB(sb)->s_ext_max = 0;
2268 #endif
2269         }
2270 }
2271
2272 /*
2273  * called at umount time
2274  */
2275 void ext4_ext_release(struct super_block *sb)
2276 {
2277         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2278                 return;
2279
2280 #ifdef EXTENTS_STATS
2281         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2282                 struct ext4_sb_info *sbi = EXT4_SB(sb);
2283                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2284                         sbi->s_ext_blocks, sbi->s_ext_extents,
2285                         sbi->s_ext_blocks / sbi->s_ext_extents);
2286                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2287                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2288         }
2289 #endif
2290 }
2291
2292 static void bi_complete(struct bio *bio, int error)
2293 {
2294         complete((struct completion *)bio->bi_private);
2295 }
2296
2297 /* FIXME!! we need to try to merge to left or right after zero-out  */
2298 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2299 {
2300         int ret = -EIO;
2301         struct bio *bio;
2302         int blkbits, blocksize;
2303         sector_t ee_pblock;
2304         struct completion event;
2305         unsigned int ee_len, len, done, offset;
2306
2307
2308         blkbits   = inode->i_blkbits;
2309         blocksize = inode->i_sb->s_blocksize;
2310         ee_len    = ext4_ext_get_actual_len(ex);
2311         ee_pblock = ext_pblock(ex);
2312
2313         /* convert ee_pblock to 512 byte sectors */
2314         ee_pblock = ee_pblock << (blkbits - 9);
2315
2316         while (ee_len > 0) {
2317
2318                 if (ee_len > BIO_MAX_PAGES)
2319                         len = BIO_MAX_PAGES;
2320                 else
2321                         len = ee_len;
2322
2323                 bio = bio_alloc(GFP_NOIO, len);
2324                 if (!bio)
2325                         return -ENOMEM;
2326                 bio->bi_sector = ee_pblock;
2327                 bio->bi_bdev   = inode->i_sb->s_bdev;
2328
2329                 done = 0;
2330                 offset = 0;
2331                 while (done < len) {
2332                         ret = bio_add_page(bio, ZERO_PAGE(0),
2333                                                         blocksize, offset);
2334                         if (ret != blocksize) {
2335                                 /*
2336                                  * We can't add any more pages because of
2337                                  * hardware limitations.  Start a new bio.
2338                                  */
2339                                 break;
2340                         }
2341                         done++;
2342                         offset += blocksize;
2343                         if (offset >= PAGE_CACHE_SIZE)
2344                                 offset = 0;
2345                 }
2346
2347                 init_completion(&event);
2348                 bio->bi_private = &event;
2349                 bio->bi_end_io = bi_complete;
2350                 submit_bio(WRITE, bio);
2351                 wait_for_completion(&event);
2352
2353                 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
2354                         ret = 0;
2355                 else {
2356                         ret = -EIO;
2357                         break;
2358                 }
2359                 bio_put(bio);
2360                 ee_len    -= done;
2361                 ee_pblock += done  << (blkbits - 9);
2362         }
2363         return ret;
2364 }
2365
2366 #define EXT4_EXT_ZERO_LEN 7
2367
2368 /*
2369  * This function is called by ext4_ext_get_blocks() if someone tries to write
2370  * to an uninitialized extent. It may result in splitting the uninitialized
2371  * extent into multiple extents (upto three - one initialized and two
2372  * uninitialized).
2373  * There are three possibilities:
2374  *   a> There is no split required: Entire extent should be initialized
2375  *   b> Splits in two extents: Write is happening at either end of the extent
2376  *   c> Splits in three extents: Somone is writing in middle of the extent
2377  */
2378 static int ext4_ext_convert_to_initialized(handle_t *handle,
2379                                                 struct inode *inode,
2380                                                 struct ext4_ext_path *path,
2381                                                 ext4_lblk_t iblock,
2382                                                 unsigned int max_blocks)
2383 {
2384         struct ext4_extent *ex, newex, orig_ex;
2385         struct ext4_extent *ex1 = NULL;
2386         struct ext4_extent *ex2 = NULL;
2387         struct ext4_extent *ex3 = NULL;
2388         struct ext4_extent_header *eh;
2389         ext4_lblk_t ee_block;
2390         unsigned int allocated, ee_len, depth;
2391         ext4_fsblk_t newblock;
2392         int err = 0;
2393         int ret = 0;
2394
2395         depth = ext_depth(inode);
2396         eh = path[depth].p_hdr;
2397         ex = path[depth].p_ext;
2398         ee_block = le32_to_cpu(ex->ee_block);
2399         ee_len = ext4_ext_get_actual_len(ex);
2400         allocated = ee_len - (iblock - ee_block);
2401         newblock = iblock - ee_block + ext_pblock(ex);
2402         ex2 = ex;
2403         orig_ex.ee_block = ex->ee_block;
2404         orig_ex.ee_len   = cpu_to_le16(ee_len);
2405         ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2406
2407         err = ext4_ext_get_access(handle, inode, path + depth);
2408         if (err)
2409                 goto out;
2410         /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2411         if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
2412                 err =  ext4_ext_zeroout(inode, &orig_ex);
2413                 if (err)
2414                         goto fix_extent_len;
2415                 /* update the extent length and mark as initialized */
2416                 ex->ee_block = orig_ex.ee_block;
2417                 ex->ee_len   = orig_ex.ee_len;
2418                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2419                 ext4_ext_dirty(handle, inode, path + depth);
2420                 /* zeroed the full extent */
2421                 return allocated;
2422         }
2423
2424         /* ex1: ee_block to iblock - 1 : uninitialized */
2425         if (iblock > ee_block) {
2426                 ex1 = ex;
2427                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2428                 ext4_ext_mark_uninitialized(ex1);
2429                 ex2 = &newex;
2430         }
2431         /*
2432          * for sanity, update the length of the ex2 extent before
2433          * we insert ex3, if ex1 is NULL. This is to avoid temporary
2434          * overlap of blocks.
2435          */
2436         if (!ex1 && allocated > max_blocks)
2437                 ex2->ee_len = cpu_to_le16(max_blocks);
2438         /* ex3: to ee_block + ee_len : uninitialised */
2439         if (allocated > max_blocks) {
2440                 unsigned int newdepth;
2441                 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2442                 if (allocated <= EXT4_EXT_ZERO_LEN) {
2443                         /*
2444                          * iblock == ee_block is handled by the zerouout
2445                          * at the beginning.
2446                          * Mark first half uninitialized.
2447                          * Mark second half initialized and zero out the
2448                          * initialized extent
2449                          */
2450                         ex->ee_block = orig_ex.ee_block;
2451                         ex->ee_len   = cpu_to_le16(ee_len - allocated);
2452                         ext4_ext_mark_uninitialized(ex);
2453                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2454                         ext4_ext_dirty(handle, inode, path + depth);
2455
2456                         ex3 = &newex;
2457                         ex3->ee_block = cpu_to_le32(iblock);
2458                         ext4_ext_store_pblock(ex3, newblock);
2459                         ex3->ee_len = cpu_to_le16(allocated);
2460                         err = ext4_ext_insert_extent(handle, inode, path, ex3);
2461                         if (err == -ENOSPC) {
2462                                 err =  ext4_ext_zeroout(inode, &orig_ex);
2463                                 if (err)
2464                                         goto fix_extent_len;
2465                                 ex->ee_block = orig_ex.ee_block;
2466                                 ex->ee_len   = orig_ex.ee_len;
2467                                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2468                                 ext4_ext_dirty(handle, inode, path + depth);
2469                                 /* blocks available from iblock */
2470                                 return allocated;
2471
2472                         } else if (err)
2473                                 goto fix_extent_len;
2474
2475                         /*
2476                          * We need to zero out the second half because
2477                          * an fallocate request can update file size and
2478                          * converting the second half to initialized extent
2479                          * implies that we can leak some junk data to user
2480                          * space.
2481                          */
2482                         err =  ext4_ext_zeroout(inode, ex3);
2483                         if (err) {
2484                                 /*
2485                                  * We should actually mark the
2486                                  * second half as uninit and return error
2487                                  * Insert would have changed the extent
2488                                  */
2489                                 depth = ext_depth(inode);
2490                                 ext4_ext_drop_refs(path);
2491                                 path = ext4_ext_find_extent(inode,
2492                                                                 iblock, path);
2493                                 if (IS_ERR(path)) {
2494                                         err = PTR_ERR(path);
2495                                         return err;
2496                                 }
2497                                 /* get the second half extent details */
2498                                 ex = path[depth].p_ext;
2499                                 err = ext4_ext_get_access(handle, inode,
2500                                                                 path + depth);
2501                                 if (err)
2502                                         return err;
2503                                 ext4_ext_mark_uninitialized(ex);
2504                                 ext4_ext_dirty(handle, inode, path + depth);
2505                                 return err;
2506                         }
2507
2508                         /* zeroed the second half */
2509                         return allocated;
2510                 }
2511                 ex3 = &newex;
2512                 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2513                 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2514                 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2515                 ext4_ext_mark_uninitialized(ex3);
2516                 err = ext4_ext_insert_extent(handle, inode, path, ex3);
2517                 if (err == -ENOSPC) {
2518                         err =  ext4_ext_zeroout(inode, &orig_ex);
2519                         if (err)
2520                                 goto fix_extent_len;
2521                         /* update the extent length and mark as initialized */
2522                         ex->ee_block = orig_ex.ee_block;
2523                         ex->ee_len   = orig_ex.ee_len;
2524                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2525                         ext4_ext_dirty(handle, inode, path + depth);
2526                         /* zeroed the full extent */
2527                         /* blocks available from iblock */
2528                         return allocated;
2529
2530                 } else if (err)
2531                         goto fix_extent_len;
2532                 /*
2533                  * The depth, and hence eh & ex might change
2534                  * as part of the insert above.
2535                  */
2536                 newdepth = ext_depth(inode);
2537                 /*
2538                  * update the extent length after successful insert of the
2539                  * split extent
2540                  */
2541                 orig_ex.ee_len = cpu_to_le16(ee_len -
2542                                                 ext4_ext_get_actual_len(ex3));
2543                 depth = newdepth;
2544                 ext4_ext_drop_refs(path);
2545                 path = ext4_ext_find_extent(inode, iblock, path);
2546                 if (IS_ERR(path)) {
2547                         err = PTR_ERR(path);
2548                         goto out;
2549                 }
2550                 eh = path[depth].p_hdr;
2551                 ex = path[depth].p_ext;
2552                 if (ex2 != &newex)
2553                         ex2 = ex;
2554
2555                 err = ext4_ext_get_access(handle, inode, path + depth);
2556                 if (err)
2557                         goto out;
2558
2559                 allocated = max_blocks;
2560
2561                 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2562                  * to insert a extent in the middle zerout directly
2563                  * otherwise give the extent a chance to merge to left
2564                  */
2565                 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2566                                                         iblock != ee_block) {
2567                         err =  ext4_ext_zeroout(inode, &orig_ex);
2568                         if (err)
2569                                 goto fix_extent_len;
2570                         /* update the extent length and mark as initialized */
2571                         ex->ee_block = orig_ex.ee_block;
2572                         ex->ee_len   = orig_ex.ee_len;
2573                         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2574                         ext4_ext_dirty(handle, inode, path + depth);
2575                         /* zero out the first half */
2576                         /* blocks available from iblock */
2577                         return allocated;
2578                 }
2579         }
2580         /*
2581          * If there was a change of depth as part of the
2582          * insertion of ex3 above, we need to update the length
2583          * of the ex1 extent again here
2584          */
2585         if (ex1 && ex1 != ex) {
2586                 ex1 = ex;
2587                 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2588                 ext4_ext_mark_uninitialized(ex1);
2589                 ex2 = &newex;
2590         }
2591         /* ex2: iblock to iblock + maxblocks-1 : initialised */
2592         ex2->ee_block = cpu_to_le32(iblock);
2593         ext4_ext_store_pblock(ex2, newblock);
2594         ex2->ee_len = cpu_to_le16(allocated);
2595         if (ex2 != ex)
2596                 goto insert;
2597         /*
2598          * New (initialized) extent starts from the first block
2599          * in the current extent. i.e., ex2 == ex
2600          * We have to see if it can be merged with the extent
2601          * on the left.
2602          */
2603         if (ex2 > EXT_FIRST_EXTENT(eh)) {
2604                 /*
2605                  * To merge left, pass "ex2 - 1" to try_to_merge(),
2606                  * since it merges towards right _only_.
2607                  */
2608                 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2609                 if (ret) {
2610                         err = ext4_ext_correct_indexes(handle, inode, path);
2611                         if (err)
2612                                 goto out;
2613                         depth = ext_depth(inode);
2614                         ex2--;
2615                 }
2616         }
2617         /*
2618          * Try to Merge towards right. This might be required
2619          * only when the whole extent is being written to.
2620          * i.e. ex2 == ex and ex3 == NULL.
2621          */
2622         if (!ex3) {
2623                 ret = ext4_ext_try_to_merge(inode, path, ex2);
2624                 if (ret) {
2625                         err = ext4_ext_correct_indexes(handle, inode, path);
2626                         if (err)
2627                                 goto out;
2628                 }
2629         }
2630         /* Mark modified extent as dirty */
2631         err = ext4_ext_dirty(handle, inode, path + depth);
2632         goto out;
2633 insert:
2634         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2635         if (err == -ENOSPC) {
2636                 err =  ext4_ext_zeroout(inode, &orig_ex);
2637                 if (err)
2638                         goto fix_extent_len;
2639                 /* update the extent length and mark as initialized */
2640                 ex->ee_block = orig_ex.ee_block;
2641                 ex->ee_len   = orig_ex.ee_len;
2642                 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2643                 ext4_ext_dirty(handle, inode, path + depth);
2644                 /* zero out the first half */
2645                 return allocated;
2646         } else if (err)
2647                 goto fix_extent_len;
2648 out:
2649         return err ? err : allocated;
2650
2651 fix_extent_len:
2652         ex->ee_block = orig_ex.ee_block;
2653         ex->ee_len   = orig_ex.ee_len;
2654         ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2655         ext4_ext_mark_uninitialized(ex);
2656         ext4_ext_dirty(handle, inode, path + depth);
2657         return err;
2658 }
2659
2660 /*
2661  * Block allocation/map/preallocation routine for extents based files
2662  *
2663  *
2664  * Need to be called with
2665  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
2666  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
2667  *
2668  * return > 0, number of of blocks already mapped/allocated
2669  *          if create == 0 and these are pre-allocated blocks
2670  *              buffer head is unmapped
2671  *          otherwise blocks are mapped
2672  *
2673  * return = 0, if plain look up failed (blocks have not been allocated)
2674  *          buffer head is unmapped
2675  *
2676  * return < 0, error case.
2677  */
2678 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2679                         ext4_lblk_t iblock,
2680                         unsigned int max_blocks, struct buffer_head *bh_result,
2681                         int create, int extend_disksize)
2682 {
2683         struct ext4_ext_path *path = NULL;
2684         struct ext4_extent_header *eh;
2685         struct ext4_extent newex, *ex;
2686         ext4_fsblk_t newblock;
2687         int err = 0, depth, ret, cache_type;
2688         unsigned int allocated = 0;
2689         struct ext4_allocation_request ar;
2690         loff_t disksize;
2691
2692         __clear_bit(BH_New, &bh_result->b_state);
2693         ext_debug("blocks %u/%u requested for inode %u\n",
2694                         iblock, max_blocks, inode->i_ino);
2695
2696         /* check in cache */
2697         cache_type = ext4_ext_in_cache(inode, iblock, &newex);
2698         if (cache_type) {
2699                 if (cache_type == EXT4_EXT_CACHE_GAP) {
2700                         if (!create) {
2701                                 /*
2702                                  * block isn't allocated yet and
2703                                  * user doesn't want to allocate it
2704                                  */
2705                                 goto out2;
2706                         }
2707                         /* we should allocate requested block */
2708                 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
2709                         /* block is already allocated */
2710                         newblock = iblock
2711                                    - le32_to_cpu(newex.ee_block)
2712                                    + ext_pblock(&newex);
2713                         /* number of remaining blocks in the extent */
2714                         allocated = ext4_ext_get_actual_len(&newex) -
2715                                         (iblock - le32_to_cpu(newex.ee_block));
2716                         goto out;
2717                 } else {
2718                         BUG();
2719                 }
2720         }
2721
2722         /* find extent for this block */
2723         path = ext4_ext_find_extent(inode, iblock, NULL);
2724         if (IS_ERR(path)) {
2725                 err = PTR_ERR(path);
2726                 path = NULL;
2727                 goto out2;
2728         }
2729
2730         depth = ext_depth(inode);
2731
2732         /*
2733          * consistent leaf must not be empty;
2734          * this situation is possible, though, _during_ tree modification;
2735          * this is why assert can't be put in ext4_ext_find_extent()
2736          */
2737         BUG_ON(path[depth].p_ext == NULL && depth != 0);
2738         eh = path[depth].p_hdr;
2739
2740         ex = path[depth].p_ext;
2741         if (ex) {
2742                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2743                 ext4_fsblk_t ee_start = ext_pblock(ex);
2744                 unsigned short ee_len;
2745
2746                 /*
2747                  * Uninitialized extents are treated as holes, except that
2748                  * we split out initialized portions during a write.
2749                  */
2750                 ee_len = ext4_ext_get_actual_len(ex);
2751                 /* if found extent covers block, simply return it */
2752                 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2753                         newblock = iblock - ee_block + ee_start;
2754                         /* number of remaining blocks in the extent */
2755                         allocated = ee_len - (iblock - ee_block);
2756                         ext_debug("%u fit into %lu:%d -> %llu\n", iblock,
2757                                         ee_block, ee_len, newblock);
2758
2759                         /* Do not put uninitialized extent in the cache */
2760                         if (!ext4_ext_is_uninitialized(ex)) {
2761                                 ext4_ext_put_in_cache(inode, ee_block,
2762                                                         ee_len, ee_start,
2763                                                         EXT4_EXT_CACHE_EXTENT);
2764                                 goto out;
2765                         }
2766                         if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2767                                 goto out;
2768                         if (!create) {
2769                                 /*
2770                                  * We have blocks reserved already.  We
2771                                  * return allocated blocks so that delalloc
2772                                  * won't do block reservation for us.  But
2773                                  * the buffer head will be unmapped so that
2774                                  * a read from the block returns 0s.
2775                                  */
2776                                 if (allocated > max_blocks)
2777                                         allocated = max_blocks;
2778                                 set_buffer_unwritten(bh_result);
2779                                 goto out2;
2780                         }
2781
2782                         ret = ext4_ext_convert_to_initialized(handle, inode,
2783                                                                 path, iblock,
2784                                                                 max_blocks);
2785                         if (ret <= 0) {
2786                                 err = ret;
2787                                 goto out2;
2788                         } else
2789                                 allocated = ret;
2790                         goto outnew;
2791                 }
2792         }
2793
2794         /*
2795          * requested block isn't allocated yet;
2796          * we couldn't try to create block if create flag is zero
2797          */
2798         if (!create) {
2799                 /*
2800                  * put just found gap into cache to speed up
2801                  * subsequent requests
2802                  */
2803                 ext4_ext_put_gap_in_cache(inode, path, iblock);
2804                 goto out2;
2805         }
2806         /*
2807          * Okay, we need to do block allocation.
2808          */
2809
2810         /* find neighbour allocated blocks */
2811         ar.lleft = iblock;
2812         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
2813         if (err)
2814                 goto out2;
2815         ar.lright = iblock;
2816         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
2817         if (err)
2818                 goto out2;
2819
2820         /*
2821          * See if request is beyond maximum number of blocks we can have in
2822          * a single extent. For an initialized extent this limit is
2823          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2824          * EXT_UNINIT_MAX_LEN.
2825          */
2826         if (max_blocks > EXT_INIT_MAX_LEN &&
2827             create != EXT4_CREATE_UNINITIALIZED_EXT)
2828                 max_blocks = EXT_INIT_MAX_LEN;
2829         else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2830                  create == EXT4_CREATE_UNINITIALIZED_EXT)
2831                 max_blocks = EXT_UNINIT_MAX_LEN;
2832
2833         /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2834         newex.ee_block = cpu_to_le32(iblock);
2835         newex.ee_len = cpu_to_le16(max_blocks);
2836         err = ext4_ext_check_overlap(inode, &newex, path);
2837         if (err)
2838                 allocated = ext4_ext_get_actual_len(&newex);
2839         else
2840                 allocated = max_blocks;
2841
2842         /* allocate new block */
2843         ar.inode = inode;
2844         ar.goal = ext4_ext_find_goal(inode, path, iblock);
2845         ar.logical = iblock;
2846         ar.len = allocated;
2847         if (S_ISREG(inode->i_mode))
2848                 ar.flags = EXT4_MB_HINT_DATA;
2849         else
2850                 /* disable in-core preallocation for non-regular files */
2851                 ar.flags = 0;
2852         newblock = ext4_mb_new_blocks(handle, &ar, &err);
2853         if (!newblock)
2854                 goto out2;
2855         ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2856                   ar.goal, newblock, allocated);
2857
2858         /* try to insert new extent into found leaf and return */
2859         ext4_ext_store_pblock(&newex, newblock);
2860         newex.ee_len = cpu_to_le16(ar.len);
2861         if (create == EXT4_CREATE_UNINITIALIZED_EXT)  /* Mark uninitialized */
2862                 ext4_ext_mark_uninitialized(&newex);
2863         err = ext4_ext_insert_extent(handle, inode, path, &newex);
2864         if (err) {
2865                 /* free data blocks we just allocated */
2866                 /* not a good idea to call discard here directly,
2867                  * but otherwise we'd need to call it every free() */
2868                 ext4_discard_preallocations(inode);
2869                 ext4_free_blocks(handle, inode, ext_pblock(&newex),
2870                                         ext4_ext_get_actual_len(&newex), 0);
2871                 goto out2;
2872         }
2873
2874         /* previous routine could use block we allocated */
2875         newblock = ext_pblock(&newex);
2876         allocated = ext4_ext_get_actual_len(&newex);
2877 outnew:
2878         if (extend_disksize) {
2879                 disksize = ((loff_t) iblock + ar.len) << inode->i_blkbits;
2880                 if (disksize > i_size_read(inode))
2881                         disksize = i_size_read(inode);
2882                 if (disksize > EXT4_I(inode)->i_disksize)
2883                         EXT4_I(inode)->i_disksize = disksize;
2884         }
2885
2886         set_buffer_new(bh_result);
2887
2888         /* Cache only when it is _not_ an uninitialized extent */
2889         if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2890                 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2891                                                 EXT4_EXT_CACHE_EXTENT);
2892 out:
2893         if (allocated > max_blocks)
2894                 allocated = max_blocks;
2895         ext4_ext_show_leaf(inode, path);
2896         set_buffer_mapped(bh_result);
2897         bh_result->b_bdev = inode->i_sb->s_bdev;
2898         bh_result->b_blocknr = newblock;
2899 out2:
2900         if (path) {
2901                 ext4_ext_drop_refs(path);
2902                 kfree(path);
2903         }
2904         return err ? err : allocated;
2905 }
2906
2907 void ext4_ext_truncate(struct inode *inode)
2908 {
2909         struct address_space *mapping = inode->i_mapping;
2910         struct super_block *sb = inode->i_sb;
2911         ext4_lblk_t last_block;
2912         handle_t *handle;
2913         int err = 0;
2914
2915         /*
2916          * probably first extent we're gonna free will be last in block
2917          */
2918         err = ext4_writepage_trans_blocks(inode);
2919         handle = ext4_journal_start(inode, err);
2920         if (IS_ERR(handle))
2921                 return;
2922
2923         if (inode->i_size & (sb->s_blocksize - 1))
2924                 ext4_block_truncate_page(handle, mapping, inode->i_size);
2925
2926         if (ext4_orphan_add(handle, inode))
2927                 goto out_stop;
2928
2929         down_write(&EXT4_I(inode)->i_data_sem);
2930         ext4_ext_invalidate_cache(inode);
2931
2932         ext4_discard_preallocations(inode);
2933
2934         /*
2935          * TODO: optimization is possible here.
2936          * Probably we need not scan at all,
2937          * because page truncation is enough.
2938          */
2939
2940         /* we have to know where to truncate from in crash case */
2941         EXT4_I(inode)->i_disksize = inode->i_size;
2942         ext4_mark_inode_dirty(handle, inode);
2943
2944         last_block = (inode->i_size + sb->s_blocksize - 1)
2945                         >> EXT4_BLOCK_SIZE_BITS(sb);
2946         err = ext4_ext_remove_space(inode, last_block);
2947
2948         /* In a multi-transaction truncate, we only make the final
2949          * transaction synchronous.
2950          */
2951         if (IS_SYNC(inode))
2952                 ext4_handle_sync(handle);
2953
2954 out_stop:
2955         up_write(&EXT4_I(inode)->i_data_sem);
2956         /*
2957          * If this was a simple ftruncate() and the file will remain alive,
2958          * then we need to clear up the orphan record which we created above.
2959          * However, if this was a real unlink then we were called by
2960          * ext4_delete_inode(), and we allow that function to clean up the
2961          * orphan info for us.
2962          */
2963         if (inode->i_nlink)
2964                 ext4_orphan_del(handle, inode);
2965
2966         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2967         ext4_mark_inode_dirty(handle, inode);
2968         ext4_journal_stop(handle);
2969 }
2970
2971 static void ext4_falloc_update_inode(struct inode *inode,
2972                                 int mode, loff_t new_size, int update_ctime)
2973 {
2974         struct timespec now;
2975
2976         if (update_ctime) {
2977                 now = current_fs_time(inode->i_sb);
2978                 if (!timespec_equal(&inode->i_ctime, &now))
2979                         inode->i_ctime = now;
2980         }
2981         /*
2982          * Update only when preallocation was requested beyond
2983          * the file size.
2984          */
2985         if (!(mode & FALLOC_FL_KEEP_SIZE)) {
2986                 if (new_size > i_size_read(inode))
2987                         i_size_write(inode, new_size);
2988                 if (new_size > EXT4_I(inode)->i_disksize)
2989                         ext4_update_i_disksize(inode, new_size);
2990         }
2991
2992 }
2993
2994 /*
2995  * preallocate space for a file. This implements ext4's fallocate inode
2996  * operation, which gets called from sys_fallocate system call.
2997  * For block-mapped files, posix_fallocate should fall back to the method
2998  * of writing zeroes to the required new blocks (the same behavior which is
2999  * expected for file systems which do not support fallocate() system call).
3000  */
3001 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3002 {
3003         handle_t *handle;
3004         ext4_lblk_t block;
3005         loff_t new_size;
3006         unsigned int max_blocks;
3007         int ret = 0;
3008         int ret2 = 0;
3009         int retries = 0;
3010         struct buffer_head map_bh;
3011         unsigned int credits, blkbits = inode->i_blkbits;
3012
3013         /*
3014          * currently supporting (pre)allocate mode for extent-based
3015          * files _only_
3016          */
3017         if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3018                 return -EOPNOTSUPP;
3019
3020         /* preallocation to directories is currently not supported */
3021         if (S_ISDIR(inode->i_mode))
3022                 return -ENODEV;
3023
3024         block = offset >> blkbits;
3025         /*
3026          * We can't just convert len to max_blocks because
3027          * If blocksize = 4096 offset = 3072 and len = 2048
3028          */
3029         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3030                                                         - block;
3031         /*
3032          * credits to insert 1 extent into extent tree
3033          */
3034         credits = ext4_chunk_trans_blocks(inode, max_blocks);
3035         mutex_lock(&inode->i_mutex);
3036 retry:
3037         while (ret >= 0 && ret < max_blocks) {
3038                 block = block + ret;
3039                 max_blocks = max_blocks - ret;
3040                 handle = ext4_journal_start(inode, credits);
3041                 if (IS_ERR(handle)) {
3042                         ret = PTR_ERR(handle);
3043                         break;
3044                 }
3045                 ret = ext4_get_blocks_wrap(handle, inode, block,
3046                                           max_blocks, &map_bh,
3047                                           EXT4_CREATE_UNINITIALIZED_EXT, 0, 0);
3048                 if (ret <= 0) {
3049 #ifdef EXT4FS_DEBUG
3050                         WARN_ON(ret <= 0);
3051                         printk(KERN_ERR "%s: ext4_ext_get_blocks "
3052                                     "returned error inode#%lu, block=%u, "
3053                                     "max_blocks=%u", __func__,
3054                                     inode->i_ino, block, max_blocks);
3055 #endif
3056                         ext4_mark_inode_dirty(handle, inode);
3057                         ret2 = ext4_journal_stop(handle);
3058                         break;
3059                 }
3060                 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3061                                                 blkbits) >> blkbits))
3062                         new_size = offset + len;
3063                 else
3064                         new_size = (block + ret) << blkbits;
3065
3066                 ext4_falloc_update_inode(inode, mode, new_size,
3067                                                 buffer_new(&map_bh));
3068                 ext4_mark_inode_dirty(handle, inode);
3069                 ret2 = ext4_journal_stop(handle);
3070                 if (ret2)
3071                         break;
3072         }
3073         if (ret == -ENOSPC &&
3074                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
3075                 ret = 0;
3076                 goto retry;
3077         }
3078         mutex_unlock(&inode->i_mutex);
3079         return ret > 0 ? ret2 : ret;
3080 }
3081
3082 /*
3083  * Callback function called for each extent to gather FIEMAP information.
3084  */
3085 static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3086                        struct ext4_ext_cache *newex, struct ext4_extent *ex,
3087                        void *data)
3088 {
3089         struct fiemap_extent_info *fieinfo = data;
3090         unsigned long blksize_bits = inode->i_sb->s_blocksize_bits;
3091         __u64   logical;
3092         __u64   physical;
3093         __u64   length;
3094         __u32   flags = 0;
3095         int     error;
3096
3097         logical =  (__u64)newex->ec_block << blksize_bits;
3098
3099         if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3100                 pgoff_t offset;
3101                 struct page *page;
3102                 struct buffer_head *bh = NULL;
3103
3104                 offset = logical >> PAGE_SHIFT;
3105                 page = find_get_page(inode->i_mapping, offset);
3106                 if (!page || !page_has_buffers(page))
3107                         return EXT_CONTINUE;
3108
3109                 bh = page_buffers(page);
3110
3111                 if (!bh)
3112                         return EXT_CONTINUE;
3113
3114                 if (buffer_delay(bh)) {
3115                         flags |= FIEMAP_EXTENT_DELALLOC;
3116                         page_cache_release(page);
3117                 } else {
3118                         page_cache_release(page);
3119                         return EXT_CONTINUE;
3120                 }
3121         }
3122
3123         physical = (__u64)newex->ec_start << blksize_bits;
3124         length =   (__u64)newex->ec_len << blksize_bits;
3125
3126         if (ex && ext4_ext_is_uninitialized(ex))
3127                 flags |= FIEMAP_EXTENT_UNWRITTEN;
3128
3129         /*
3130          * If this extent reaches EXT_MAX_BLOCK, it must be last.
3131          *
3132          * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3133          * this also indicates no more allocated blocks.
3134          *
3135          * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3136          */
3137         if (logical + length - 1 == EXT_MAX_BLOCK ||
3138             ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK)
3139                 flags |= FIEMAP_EXTENT_LAST;
3140
3141         error = fiemap_fill_next_extent(fieinfo, logical, physical,
3142                                         length, flags);
3143         if (error < 0)
3144                 return error;
3145         if (error == 1)
3146                 return EXT_BREAK;
3147
3148         return EXT_CONTINUE;
3149 }
3150
3151 /* fiemap flags we can handle specified here */
3152 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3153
3154 static int ext4_xattr_fiemap(struct inode *inode,
3155                                 struct fiemap_extent_info *fieinfo)
3156 {
3157         __u64 physical = 0;
3158         __u64 length;
3159         __u32 flags = FIEMAP_EXTENT_LAST;
3160         int blockbits = inode->i_sb->s_blocksize_bits;
3161         int error = 0;
3162
3163         /* in-inode? */
3164         if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) {
3165                 struct ext4_iloc iloc;
3166                 int offset;     /* offset of xattr in inode */
3167
3168                 error = ext4_get_inode_loc(inode, &iloc);
3169                 if (error)
3170                         return error;
3171                 physical = iloc.bh->b_blocknr << blockbits;
3172                 offset = EXT4_GOOD_OLD_INODE_SIZE +
3173                                 EXT4_I(inode)->i_extra_isize;
3174                 physical += offset;
3175                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3176                 flags |= FIEMAP_EXTENT_DATA_INLINE;
3177         } else { /* external block */
3178                 physical = EXT4_I(inode)->i_file_acl << blockbits;
3179                 length = inode->i_sb->s_blocksize;
3180         }
3181
3182         if (physical)
3183                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3184                                                 length, flags);
3185         return (error < 0 ? error : 0);
3186 }
3187
3188 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3189                 __u64 start, __u64 len)
3190 {
3191         ext4_lblk_t start_blk;
3192         ext4_lblk_t len_blks;
3193         int error = 0;
3194
3195         /* fallback to generic here if not in extents fmt */
3196         if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
3197                 return generic_block_fiemap(inode, fieinfo, start, len,
3198                         ext4_get_block);
3199
3200         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3201                 return -EBADR;
3202
3203         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3204                 error = ext4_xattr_fiemap(inode, fieinfo);
3205         } else {
3206                 start_blk = start >> inode->i_sb->s_blocksize_bits;
3207                 len_blks = len >> inode->i_sb->s_blocksize_bits;
3208
3209                 /*
3210                  * Walk the extent tree gathering extent information.
3211                  * ext4_ext_fiemap_cb will push extents back to user.
3212                  */
3213                 down_write(&EXT4_I(inode)->i_data_sem);
3214                 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3215                                           ext4_ext_fiemap_cb, fieinfo);
3216                 up_write(&EXT4_I(inode)->i_data_sem);
3217         }
3218
3219         return error;
3220 }
3221