ext4: Check for the correct error return from
[linux-2.6] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext4_jbd2.h>
29 #include <linux/jbd2.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
46 {
47         int ea_blocks = EXT4_I(inode)->i_file_acl ?
48                 (inode->i_sb->s_blocksize >> 9) : 0;
49
50         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51 }
52
53 /*
54  * The ext4 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases.
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63                         struct buffer_head *bh, ext4_fsblk_t blocknr)
64 {
65         int err;
66
67         might_sleep();
68
69         BUFFER_TRACE(bh, "enter");
70
71         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72                   "data mode %lx\n",
73                   bh, is_metadata, inode->i_mode,
74                   test_opt(inode->i_sb, DATA_FLAGS));
75
76         /* Never use the revoke function if we are doing full data
77          * journaling: there is no need to, and a V1 superblock won't
78          * support it.  Otherwise, only skip the revoke on un-journaled
79          * data blocks. */
80
81         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82             (!is_metadata && !ext4_should_journal_data(inode))) {
83                 if (bh) {
84                         BUFFER_TRACE(bh, "call jbd2_journal_forget");
85                         return ext4_journal_forget(handle, bh);
86                 }
87                 return 0;
88         }
89
90         /*
91          * data!=journal && (is_metadata || should_journal_data(inode))
92          */
93         BUFFER_TRACE(bh, "call ext4_journal_revoke");
94         err = ext4_journal_revoke(handle, blocknr, bh);
95         if (err)
96                 ext4_abort(inode->i_sb, __FUNCTION__,
97                            "error %d when attempting revoke", err);
98         BUFFER_TRACE(bh, "exit");
99         return err;
100 }
101
102 /*
103  * Work out how many blocks we need to proceed with the next chunk of a
104  * truncate transaction.
105  */
106 static unsigned long blocks_for_truncate(struct inode *inode)
107 {
108         ext4_lblk_t needed;
109
110         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111
112         /* Give ourselves just enough room to cope with inodes in which
113          * i_blocks is corrupt: we've seen disk corruptions in the past
114          * which resulted in random data in an inode which looked enough
115          * like a regular file for ext4 to try to delete it.  Things
116          * will go a bit crazy if that happens, but at least we should
117          * try not to panic the whole kernel. */
118         if (needed < 2)
119                 needed = 2;
120
121         /* But we need to bound the transaction so we don't overflow the
122          * journal. */
123         if (needed > EXT4_MAX_TRANS_DATA)
124                 needed = EXT4_MAX_TRANS_DATA;
125
126         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127 }
128
129 /*
130  * Truncate transactions can be complex and absolutely huge.  So we need to
131  * be able to restart the transaction at a conventient checkpoint to make
132  * sure we don't overflow the journal.
133  *
134  * start_transaction gets us a new handle for a truncate transaction,
135  * and extend_transaction tries to extend the existing one a bit.  If
136  * extend fails, we need to propagate the failure up and restart the
137  * transaction in the top-level truncate loop. --sct
138  */
139 static handle_t *start_transaction(struct inode *inode)
140 {
141         handle_t *result;
142
143         result = ext4_journal_start(inode, blocks_for_truncate(inode));
144         if (!IS_ERR(result))
145                 return result;
146
147         ext4_std_error(inode->i_sb, PTR_ERR(result));
148         return result;
149 }
150
151 /*
152  * Try to extend this transaction for the purposes of truncation.
153  *
154  * Returns 0 if we managed to create more room.  If we can't create more
155  * room, and the transaction must be restarted we return 1.
156  */
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158 {
159         if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160                 return 0;
161         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162                 return 0;
163         return 1;
164 }
165
166 /*
167  * Restart the transaction associated with *handle.  This does a commit,
168  * so before we call here everything must be consistently dirtied against
169  * this transaction.
170  */
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172 {
173         jbd_debug(2, "restarting handle %p\n", handle);
174         return ext4_journal_restart(handle, blocks_for_truncate(inode));
175 }
176
177 /*
178  * Called at the last iput() if i_nlink is zero.
179  */
180 void ext4_delete_inode (struct inode * inode)
181 {
182         handle_t *handle;
183
184         truncate_inode_pages(&inode->i_data, 0);
185
186         if (is_bad_inode(inode))
187                 goto no_delete;
188
189         handle = start_transaction(inode);
190         if (IS_ERR(handle)) {
191                 /*
192                  * If we're going to skip the normal cleanup, we still need to
193                  * make sure that the in-core orphan linked list is properly
194                  * cleaned up.
195                  */
196                 ext4_orphan_del(NULL, inode);
197                 goto no_delete;
198         }
199
200         if (IS_SYNC(inode))
201                 handle->h_sync = 1;
202         inode->i_size = 0;
203         if (inode->i_blocks)
204                 ext4_truncate(inode);
205         /*
206          * Kill off the orphan record which ext4_truncate created.
207          * AKPM: I think this can be inside the above `if'.
208          * Note that ext4_orphan_del() has to be able to cope with the
209          * deletion of a non-existent orphan - this is because we don't
210          * know if ext4_truncate() actually created an orphan record.
211          * (Well, we could do this if we need to, but heck - it works)
212          */
213         ext4_orphan_del(handle, inode);
214         EXT4_I(inode)->i_dtime  = get_seconds();
215
216         /*
217          * One subtle ordering requirement: if anything has gone wrong
218          * (transaction abort, IO errors, whatever), then we can still
219          * do these next steps (the fs will already have been marked as
220          * having errors), but we can't free the inode if the mark_dirty
221          * fails.
222          */
223         if (ext4_mark_inode_dirty(handle, inode))
224                 /* If that failed, just do the required in-core inode clear. */
225                 clear_inode(inode);
226         else
227                 ext4_free_inode(handle, inode);
228         ext4_journal_stop(handle);
229         return;
230 no_delete:
231         clear_inode(inode);     /* We must guarantee clearing of inode... */
232 }
233
234 typedef struct {
235         __le32  *p;
236         __le32  key;
237         struct buffer_head *bh;
238 } Indirect;
239
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241 {
242         p->key = *(p->p = v);
243         p->bh = bh;
244 }
245
246 static int verify_chain(Indirect *from, Indirect *to)
247 {
248         while (from <= to && from->key == *from->p)
249                 from++;
250         return (from > to);
251 }
252
253 /**
254  *      ext4_block_to_path - parse the block number into array of offsets
255  *      @inode: inode in question (we are only interested in its superblock)
256  *      @i_block: block number to be parsed
257  *      @offsets: array to store the offsets in
258  *      @boundary: set this non-zero if the referred-to block is likely to be
259  *             followed (on disk) by an indirect block.
260  *
261  *      To store the locations of file's data ext4 uses a data structure common
262  *      for UNIX filesystems - tree of pointers anchored in the inode, with
263  *      data blocks at leaves and indirect blocks in intermediate nodes.
264  *      This function translates the block number into path in that tree -
265  *      return value is the path length and @offsets[n] is the offset of
266  *      pointer to (n+1)th node in the nth one. If @block is out of range
267  *      (negative or too large) warning is printed and zero returned.
268  *
269  *      Note: function doesn't find node addresses, so no IO is needed. All
270  *      we need to know is the capacity of indirect blocks (taken from the
271  *      inode->i_sb).
272  */
273
274 /*
275  * Portability note: the last comparison (check that we fit into triple
276  * indirect block) is spelled differently, because otherwise on an
277  * architecture with 32-bit longs and 8Kb pages we might get into trouble
278  * if our filesystem had 8Kb blocks. We might use long long, but that would
279  * kill us on x86. Oh, well, at least the sign propagation does not matter -
280  * i_block would have to be negative in the very beginning, so we would not
281  * get there at all.
282  */
283
284 static int ext4_block_to_path(struct inode *inode,
285                         ext4_lblk_t i_block,
286                         ext4_lblk_t offsets[4], int *boundary)
287 {
288         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
289         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
290         const long direct_blocks = EXT4_NDIR_BLOCKS,
291                 indirect_blocks = ptrs,
292                 double_blocks = (1 << (ptrs_bits * 2));
293         int n = 0;
294         int final = 0;
295
296         if (i_block < 0) {
297                 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
298         } else if (i_block < direct_blocks) {
299                 offsets[n++] = i_block;
300                 final = direct_blocks;
301         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
302                 offsets[n++] = EXT4_IND_BLOCK;
303                 offsets[n++] = i_block;
304                 final = ptrs;
305         } else if ((i_block -= indirect_blocks) < double_blocks) {
306                 offsets[n++] = EXT4_DIND_BLOCK;
307                 offsets[n++] = i_block >> ptrs_bits;
308                 offsets[n++] = i_block & (ptrs - 1);
309                 final = ptrs;
310         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
311                 offsets[n++] = EXT4_TIND_BLOCK;
312                 offsets[n++] = i_block >> (ptrs_bits * 2);
313                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
314                 offsets[n++] = i_block & (ptrs - 1);
315                 final = ptrs;
316         } else {
317                 ext4_warning(inode->i_sb, "ext4_block_to_path",
318                                 "block %u > max",
319                                 i_block + direct_blocks +
320                                 indirect_blocks + double_blocks);
321         }
322         if (boundary)
323                 *boundary = final - 1 - (i_block & (ptrs - 1));
324         return n;
325 }
326
327 /**
328  *      ext4_get_branch - read the chain of indirect blocks leading to data
329  *      @inode: inode in question
330  *      @depth: depth of the chain (1 - direct pointer, etc.)
331  *      @offsets: offsets of pointers in inode/indirect blocks
332  *      @chain: place to store the result
333  *      @err: here we store the error value
334  *
335  *      Function fills the array of triples <key, p, bh> and returns %NULL
336  *      if everything went OK or the pointer to the last filled triple
337  *      (incomplete one) otherwise. Upon the return chain[i].key contains
338  *      the number of (i+1)-th block in the chain (as it is stored in memory,
339  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
340  *      number (it points into struct inode for i==0 and into the bh->b_data
341  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
342  *      block for i>0 and NULL for i==0. In other words, it holds the block
343  *      numbers of the chain, addresses they were taken from (and where we can
344  *      verify that chain did not change) and buffer_heads hosting these
345  *      numbers.
346  *
347  *      Function stops when it stumbles upon zero pointer (absent block)
348  *              (pointer to last triple returned, *@err == 0)
349  *      or when it gets an IO error reading an indirect block
350  *              (ditto, *@err == -EIO)
351  *      or when it notices that chain had been changed while it was reading
352  *              (ditto, *@err == -EAGAIN)
353  *      or when it reads all @depth-1 indirect blocks successfully and finds
354  *      the whole chain, all way to the data (returns %NULL, *err == 0).
355  */
356 static Indirect *ext4_get_branch(struct inode *inode, int depth,
357                                  ext4_lblk_t  *offsets,
358                                  Indirect chain[4], int *err)
359 {
360         struct super_block *sb = inode->i_sb;
361         Indirect *p = chain;
362         struct buffer_head *bh;
363
364         *err = 0;
365         /* i_data is not going away, no lock needed */
366         add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
367         if (!p->key)
368                 goto no_block;
369         while (--depth) {
370                 bh = sb_bread(sb, le32_to_cpu(p->key));
371                 if (!bh)
372                         goto failure;
373                 /* Reader: pointers */
374                 if (!verify_chain(chain, p))
375                         goto changed;
376                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
377                 /* Reader: end */
378                 if (!p->key)
379                         goto no_block;
380         }
381         return NULL;
382
383 changed:
384         brelse(bh);
385         *err = -EAGAIN;
386         goto no_block;
387 failure:
388         *err = -EIO;
389 no_block:
390         return p;
391 }
392
393 /**
394  *      ext4_find_near - find a place for allocation with sufficient locality
395  *      @inode: owner
396  *      @ind: descriptor of indirect block.
397  *
398  *      This function returns the prefered place for block allocation.
399  *      It is used when heuristic for sequential allocation fails.
400  *      Rules are:
401  *        + if there is a block to the left of our position - allocate near it.
402  *        + if pointer will live in indirect block - allocate near that block.
403  *        + if pointer will live in inode - allocate in the same
404  *          cylinder group.
405  *
406  * In the latter case we colour the starting block by the callers PID to
407  * prevent it from clashing with concurrent allocations for a different inode
408  * in the same block group.   The PID is used here so that functionally related
409  * files will be close-by on-disk.
410  *
411  *      Caller must make sure that @ind is valid and will stay that way.
412  */
413 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
414 {
415         struct ext4_inode_info *ei = EXT4_I(inode);
416         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
417         __le32 *p;
418         ext4_fsblk_t bg_start;
419         ext4_grpblk_t colour;
420
421         /* Try to find previous block */
422         for (p = ind->p - 1; p >= start; p--) {
423                 if (*p)
424                         return le32_to_cpu(*p);
425         }
426
427         /* No such thing, so let's try location of indirect block */
428         if (ind->bh)
429                 return ind->bh->b_blocknr;
430
431         /*
432          * It is going to be referred to from the inode itself? OK, just put it
433          * into the same cylinder group then.
434          */
435         bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
436         colour = (current->pid % 16) *
437                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
438         return bg_start + colour;
439 }
440
441 /**
442  *      ext4_find_goal - find a prefered place for allocation.
443  *      @inode: owner
444  *      @block:  block we want
445  *      @chain:  chain of indirect blocks
446  *      @partial: pointer to the last triple within a chain
447  *      @goal:  place to store the result.
448  *
449  *      Normally this function find the prefered place for block allocation,
450  *      stores it in *@goal and returns zero.
451  */
452
453 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
454                 Indirect chain[4], Indirect *partial)
455 {
456         struct ext4_block_alloc_info *block_i;
457
458         block_i =  EXT4_I(inode)->i_block_alloc_info;
459
460         /*
461          * try the heuristic for sequential allocation,
462          * failing that at least try to get decent locality.
463          */
464         if (block_i && (block == block_i->last_alloc_logical_block + 1)
465                 && (block_i->last_alloc_physical_block != 0)) {
466                 return block_i->last_alloc_physical_block + 1;
467         }
468
469         return ext4_find_near(inode, partial);
470 }
471
472 /**
473  *      ext4_blks_to_allocate: Look up the block map and count the number
474  *      of direct blocks need to be allocated for the given branch.
475  *
476  *      @branch: chain of indirect blocks
477  *      @k: number of blocks need for indirect blocks
478  *      @blks: number of data blocks to be mapped.
479  *      @blocks_to_boundary:  the offset in the indirect block
480  *
481  *      return the total number of blocks to be allocate, including the
482  *      direct and indirect blocks.
483  */
484 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
485                 int blocks_to_boundary)
486 {
487         unsigned long count = 0;
488
489         /*
490          * Simple case, [t,d]Indirect block(s) has not allocated yet
491          * then it's clear blocks on that path have not allocated
492          */
493         if (k > 0) {
494                 /* right now we don't handle cross boundary allocation */
495                 if (blks < blocks_to_boundary + 1)
496                         count += blks;
497                 else
498                         count += blocks_to_boundary + 1;
499                 return count;
500         }
501
502         count++;
503         while (count < blks && count <= blocks_to_boundary &&
504                 le32_to_cpu(*(branch[0].p + count)) == 0) {
505                 count++;
506         }
507         return count;
508 }
509
510 /**
511  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
512  *      @indirect_blks: the number of blocks need to allocate for indirect
513  *                      blocks
514  *
515  *      @new_blocks: on return it will store the new block numbers for
516  *      the indirect blocks(if needed) and the first direct block,
517  *      @blks:  on return it will store the total number of allocated
518  *              direct blocks
519  */
520 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
521                         ext4_fsblk_t goal, int indirect_blks, int blks,
522                         ext4_fsblk_t new_blocks[4], int *err)
523 {
524         int target, i;
525         unsigned long count = 0;
526         int index = 0;
527         ext4_fsblk_t current_block = 0;
528         int ret = 0;
529
530         /*
531          * Here we try to allocate the requested multiple blocks at once,
532          * on a best-effort basis.
533          * To build a branch, we should allocate blocks for
534          * the indirect blocks(if not allocated yet), and at least
535          * the first direct block of this branch.  That's the
536          * minimum number of blocks need to allocate(required)
537          */
538         target = blks + indirect_blks;
539
540         while (1) {
541                 count = target;
542                 /* allocating blocks for indirect blocks and direct blocks */
543                 current_block = ext4_new_blocks(handle,inode,goal,&count,err);
544                 if (*err)
545                         goto failed_out;
546
547                 target -= count;
548                 /* allocate blocks for indirect blocks */
549                 while (index < indirect_blks && count) {
550                         new_blocks[index++] = current_block++;
551                         count--;
552                 }
553
554                 if (count > 0)
555                         break;
556         }
557
558         /* save the new block number for the first direct block */
559         new_blocks[index] = current_block;
560
561         /* total number of blocks allocated for direct blocks */
562         ret = count;
563         *err = 0;
564         return ret;
565 failed_out:
566         for (i = 0; i <index; i++)
567                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
568         return ret;
569 }
570
571 /**
572  *      ext4_alloc_branch - allocate and set up a chain of blocks.
573  *      @inode: owner
574  *      @indirect_blks: number of allocated indirect blocks
575  *      @blks: number of allocated direct blocks
576  *      @offsets: offsets (in the blocks) to store the pointers to next.
577  *      @branch: place to store the chain in.
578  *
579  *      This function allocates blocks, zeroes out all but the last one,
580  *      links them into chain and (if we are synchronous) writes them to disk.
581  *      In other words, it prepares a branch that can be spliced onto the
582  *      inode. It stores the information about that chain in the branch[], in
583  *      the same format as ext4_get_branch() would do. We are calling it after
584  *      we had read the existing part of chain and partial points to the last
585  *      triple of that (one with zero ->key). Upon the exit we have the same
586  *      picture as after the successful ext4_get_block(), except that in one
587  *      place chain is disconnected - *branch->p is still zero (we did not
588  *      set the last link), but branch->key contains the number that should
589  *      be placed into *branch->p to fill that gap.
590  *
591  *      If allocation fails we free all blocks we've allocated (and forget
592  *      their buffer_heads) and return the error value the from failed
593  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
594  *      as described above and return 0.
595  */
596 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
597                         int indirect_blks, int *blks, ext4_fsblk_t goal,
598                         ext4_lblk_t *offsets, Indirect *branch)
599 {
600         int blocksize = inode->i_sb->s_blocksize;
601         int i, n = 0;
602         int err = 0;
603         struct buffer_head *bh;
604         int num;
605         ext4_fsblk_t new_blocks[4];
606         ext4_fsblk_t current_block;
607
608         num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
609                                 *blks, new_blocks, &err);
610         if (err)
611                 return err;
612
613         branch[0].key = cpu_to_le32(new_blocks[0]);
614         /*
615          * metadata blocks and data blocks are allocated.
616          */
617         for (n = 1; n <= indirect_blks;  n++) {
618                 /*
619                  * Get buffer_head for parent block, zero it out
620                  * and set the pointer to new one, then send
621                  * parent to disk.
622                  */
623                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
624                 branch[n].bh = bh;
625                 lock_buffer(bh);
626                 BUFFER_TRACE(bh, "call get_create_access");
627                 err = ext4_journal_get_create_access(handle, bh);
628                 if (err) {
629                         unlock_buffer(bh);
630                         brelse(bh);
631                         goto failed;
632                 }
633
634                 memset(bh->b_data, 0, blocksize);
635                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
636                 branch[n].key = cpu_to_le32(new_blocks[n]);
637                 *branch[n].p = branch[n].key;
638                 if ( n == indirect_blks) {
639                         current_block = new_blocks[n];
640                         /*
641                          * End of chain, update the last new metablock of
642                          * the chain to point to the new allocated
643                          * data blocks numbers
644                          */
645                         for (i=1; i < num; i++)
646                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
647                 }
648                 BUFFER_TRACE(bh, "marking uptodate");
649                 set_buffer_uptodate(bh);
650                 unlock_buffer(bh);
651
652                 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
653                 err = ext4_journal_dirty_metadata(handle, bh);
654                 if (err)
655                         goto failed;
656         }
657         *blks = num;
658         return err;
659 failed:
660         /* Allocation failed, free what we already allocated */
661         for (i = 1; i <= n ; i++) {
662                 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
663                 ext4_journal_forget(handle, branch[i].bh);
664         }
665         for (i = 0; i <indirect_blks; i++)
666                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
667
668         ext4_free_blocks(handle, inode, new_blocks[i], num);
669
670         return err;
671 }
672
673 /**
674  * ext4_splice_branch - splice the allocated branch onto inode.
675  * @inode: owner
676  * @block: (logical) number of block we are adding
677  * @chain: chain of indirect blocks (with a missing link - see
678  *      ext4_alloc_branch)
679  * @where: location of missing link
680  * @num:   number of indirect blocks we are adding
681  * @blks:  number of direct blocks we are adding
682  *
683  * This function fills the missing link and does all housekeeping needed in
684  * inode (->i_blocks, etc.). In case of success we end up with the full
685  * chain to new block and return 0.
686  */
687 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
688                         ext4_lblk_t block, Indirect *where, int num, int blks)
689 {
690         int i;
691         int err = 0;
692         struct ext4_block_alloc_info *block_i;
693         ext4_fsblk_t current_block;
694
695         block_i = EXT4_I(inode)->i_block_alloc_info;
696         /*
697          * If we're splicing into a [td]indirect block (as opposed to the
698          * inode) then we need to get write access to the [td]indirect block
699          * before the splice.
700          */
701         if (where->bh) {
702                 BUFFER_TRACE(where->bh, "get_write_access");
703                 err = ext4_journal_get_write_access(handle, where->bh);
704                 if (err)
705                         goto err_out;
706         }
707         /* That's it */
708
709         *where->p = where->key;
710
711         /*
712          * Update the host buffer_head or inode to point to more just allocated
713          * direct blocks blocks
714          */
715         if (num == 0 && blks > 1) {
716                 current_block = le32_to_cpu(where->key) + 1;
717                 for (i = 1; i < blks; i++)
718                         *(where->p + i ) = cpu_to_le32(current_block++);
719         }
720
721         /*
722          * update the most recently allocated logical & physical block
723          * in i_block_alloc_info, to assist find the proper goal block for next
724          * allocation
725          */
726         if (block_i) {
727                 block_i->last_alloc_logical_block = block + blks - 1;
728                 block_i->last_alloc_physical_block =
729                                 le32_to_cpu(where[num].key) + blks - 1;
730         }
731
732         /* We are done with atomic stuff, now do the rest of housekeeping */
733
734         inode->i_ctime = ext4_current_time(inode);
735         ext4_mark_inode_dirty(handle, inode);
736
737         /* had we spliced it onto indirect block? */
738         if (where->bh) {
739                 /*
740                  * If we spliced it onto an indirect block, we haven't
741                  * altered the inode.  Note however that if it is being spliced
742                  * onto an indirect block at the very end of the file (the
743                  * file is growing) then we *will* alter the inode to reflect
744                  * the new i_size.  But that is not done here - it is done in
745                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
746                  */
747                 jbd_debug(5, "splicing indirect only\n");
748                 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
749                 err = ext4_journal_dirty_metadata(handle, where->bh);
750                 if (err)
751                         goto err_out;
752         } else {
753                 /*
754                  * OK, we spliced it into the inode itself on a direct block.
755                  * Inode was dirtied above.
756                  */
757                 jbd_debug(5, "splicing direct\n");
758         }
759         return err;
760
761 err_out:
762         for (i = 1; i <= num; i++) {
763                 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
764                 ext4_journal_forget(handle, where[i].bh);
765                 ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
766         }
767         ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
768
769         return err;
770 }
771
772 /*
773  * Allocation strategy is simple: if we have to allocate something, we will
774  * have to go the whole way to leaf. So let's do it before attaching anything
775  * to tree, set linkage between the newborn blocks, write them if sync is
776  * required, recheck the path, free and repeat if check fails, otherwise
777  * set the last missing link (that will protect us from any truncate-generated
778  * removals - all blocks on the path are immune now) and possibly force the
779  * write on the parent block.
780  * That has a nice additional property: no special recovery from the failed
781  * allocations is needed - we simply release blocks and do not touch anything
782  * reachable from inode.
783  *
784  * `handle' can be NULL if create == 0.
785  *
786  * The BKL may not be held on entry here.  Be sure to take it early.
787  * return > 0, # of blocks mapped or allocated.
788  * return = 0, if plain lookup failed.
789  * return < 0, error case.
790  */
791 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
792                 ext4_lblk_t iblock, unsigned long maxblocks,
793                 struct buffer_head *bh_result,
794                 int create, int extend_disksize)
795 {
796         int err = -EIO;
797         ext4_lblk_t offsets[4];
798         Indirect chain[4];
799         Indirect *partial;
800         ext4_fsblk_t goal;
801         int indirect_blks;
802         int blocks_to_boundary = 0;
803         int depth;
804         struct ext4_inode_info *ei = EXT4_I(inode);
805         int count = 0;
806         ext4_fsblk_t first_block = 0;
807
808
809         J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
810         J_ASSERT(handle != NULL || create == 0);
811         depth = ext4_block_to_path(inode, iblock, offsets,
812                                         &blocks_to_boundary);
813
814         if (depth == 0)
815                 goto out;
816
817         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
818
819         /* Simplest case - block found, no allocation needed */
820         if (!partial) {
821                 first_block = le32_to_cpu(chain[depth - 1].key);
822                 clear_buffer_new(bh_result);
823                 count++;
824                 /*map more blocks*/
825                 while (count < maxblocks && count <= blocks_to_boundary) {
826                         ext4_fsblk_t blk;
827
828                         if (!verify_chain(chain, partial)) {
829                                 /*
830                                  * Indirect block might be removed by
831                                  * truncate while we were reading it.
832                                  * Handling of that case: forget what we've
833                                  * got now. Flag the err as EAGAIN, so it
834                                  * will reread.
835                                  */
836                                 err = -EAGAIN;
837                                 count = 0;
838                                 break;
839                         }
840                         blk = le32_to_cpu(*(chain[depth-1].p + count));
841
842                         if (blk == first_block + count)
843                                 count++;
844                         else
845                                 break;
846                 }
847                 if (err != -EAGAIN)
848                         goto got_it;
849         }
850
851         /* Next simple case - plain lookup or failed read of indirect block */
852         if (!create || err == -EIO)
853                 goto cleanup;
854
855         mutex_lock(&ei->truncate_mutex);
856
857         /*
858          * If the indirect block is missing while we are reading
859          * the chain(ext4_get_branch() returns -EAGAIN err), or
860          * if the chain has been changed after we grab the semaphore,
861          * (either because another process truncated this branch, or
862          * another get_block allocated this branch) re-grab the chain to see if
863          * the request block has been allocated or not.
864          *
865          * Since we already block the truncate/other get_block
866          * at this point, we will have the current copy of the chain when we
867          * splice the branch into the tree.
868          */
869         if (err == -EAGAIN || !verify_chain(chain, partial)) {
870                 while (partial > chain) {
871                         brelse(partial->bh);
872                         partial--;
873                 }
874                 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
875                 if (!partial) {
876                         count++;
877                         mutex_unlock(&ei->truncate_mutex);
878                         if (err)
879                                 goto cleanup;
880                         clear_buffer_new(bh_result);
881                         goto got_it;
882                 }
883         }
884
885         /*
886          * Okay, we need to do block allocation.  Lazily initialize the block
887          * allocation info here if necessary
888         */
889         if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
890                 ext4_init_block_alloc_info(inode);
891
892         goal = ext4_find_goal(inode, iblock, chain, partial);
893
894         /* the number of blocks need to allocate for [d,t]indirect blocks */
895         indirect_blks = (chain + depth) - partial - 1;
896
897         /*
898          * Next look up the indirect map to count the totoal number of
899          * direct blocks to allocate for this branch.
900          */
901         count = ext4_blks_to_allocate(partial, indirect_blks,
902                                         maxblocks, blocks_to_boundary);
903         /*
904          * Block out ext4_truncate while we alter the tree
905          */
906         err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
907                                 offsets + (partial - chain), partial);
908
909         /*
910          * The ext4_splice_branch call will free and forget any buffers
911          * on the new chain if there is a failure, but that risks using
912          * up transaction credits, especially for bitmaps where the
913          * credits cannot be returned.  Can we handle this somehow?  We
914          * may need to return -EAGAIN upwards in the worst case.  --sct
915          */
916         if (!err)
917                 err = ext4_splice_branch(handle, inode, iblock,
918                                         partial, indirect_blks, count);
919         /*
920          * i_disksize growing is protected by truncate_mutex.  Don't forget to
921          * protect it if you're about to implement concurrent
922          * ext4_get_block() -bzzz
923         */
924         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
925                 ei->i_disksize = inode->i_size;
926         mutex_unlock(&ei->truncate_mutex);
927         if (err)
928                 goto cleanup;
929
930         set_buffer_new(bh_result);
931 got_it:
932         map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
933         if (count > blocks_to_boundary)
934                 set_buffer_boundary(bh_result);
935         err = count;
936         /* Clean up and exit */
937         partial = chain + depth - 1;    /* the whole chain */
938 cleanup:
939         while (partial > chain) {
940                 BUFFER_TRACE(partial->bh, "call brelse");
941                 brelse(partial->bh);
942                 partial--;
943         }
944         BUFFER_TRACE(bh_result, "returned");
945 out:
946         return err;
947 }
948
949 #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
950
951 static int ext4_get_block(struct inode *inode, sector_t iblock,
952                         struct buffer_head *bh_result, int create)
953 {
954         handle_t *handle = ext4_journal_current_handle();
955         int ret = 0;
956         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
957
958         if (!create)
959                 goto get_block;         /* A read */
960
961         if (max_blocks == 1)
962                 goto get_block;         /* A single block get */
963
964         if (handle->h_transaction->t_state == T_LOCKED) {
965                 /*
966                  * Huge direct-io writes can hold off commits for long
967                  * periods of time.  Let this commit run.
968                  */
969                 ext4_journal_stop(handle);
970                 handle = ext4_journal_start(inode, DIO_CREDITS);
971                 if (IS_ERR(handle))
972                         ret = PTR_ERR(handle);
973                 goto get_block;
974         }
975
976         if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
977                 /*
978                  * Getting low on buffer credits...
979                  */
980                 ret = ext4_journal_extend(handle, DIO_CREDITS);
981                 if (ret > 0) {
982                         /*
983                          * Couldn't extend the transaction.  Start a new one.
984                          */
985                         ret = ext4_journal_restart(handle, DIO_CREDITS);
986                 }
987         }
988
989 get_block:
990         if (ret == 0) {
991                 ret = ext4_get_blocks_wrap(handle, inode, iblock,
992                                         max_blocks, bh_result, create, 0);
993                 if (ret > 0) {
994                         bh_result->b_size = (ret << inode->i_blkbits);
995                         ret = 0;
996                 }
997         }
998         return ret;
999 }
1000
1001 /*
1002  * `handle' can be NULL if create is zero
1003  */
1004 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1005                                 ext4_lblk_t block, int create, int *errp)
1006 {
1007         struct buffer_head dummy;
1008         int fatal = 0, err;
1009
1010         J_ASSERT(handle != NULL || create == 0);
1011
1012         dummy.b_state = 0;
1013         dummy.b_blocknr = -1000;
1014         buffer_trace_init(&dummy.b_history);
1015         err = ext4_get_blocks_wrap(handle, inode, block, 1,
1016                                         &dummy, create, 1);
1017         /*
1018          * ext4_get_blocks_handle() returns number of blocks
1019          * mapped. 0 in case of a HOLE.
1020          */
1021         if (err > 0) {
1022                 if (err > 1)
1023                         WARN_ON(1);
1024                 err = 0;
1025         }
1026         *errp = err;
1027         if (!err && buffer_mapped(&dummy)) {
1028                 struct buffer_head *bh;
1029                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1030                 if (!bh) {
1031                         *errp = -EIO;
1032                         goto err;
1033                 }
1034                 if (buffer_new(&dummy)) {
1035                         J_ASSERT(create != 0);
1036                         J_ASSERT(handle != NULL);
1037
1038                         /*
1039                          * Now that we do not always journal data, we should
1040                          * keep in mind whether this should always journal the
1041                          * new buffer as metadata.  For now, regular file
1042                          * writes use ext4_get_block instead, so it's not a
1043                          * problem.
1044                          */
1045                         lock_buffer(bh);
1046                         BUFFER_TRACE(bh, "call get_create_access");
1047                         fatal = ext4_journal_get_create_access(handle, bh);
1048                         if (!fatal && !buffer_uptodate(bh)) {
1049                                 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1050                                 set_buffer_uptodate(bh);
1051                         }
1052                         unlock_buffer(bh);
1053                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1054                         err = ext4_journal_dirty_metadata(handle, bh);
1055                         if (!fatal)
1056                                 fatal = err;
1057                 } else {
1058                         BUFFER_TRACE(bh, "not a new buffer");
1059                 }
1060                 if (fatal) {
1061                         *errp = fatal;
1062                         brelse(bh);
1063                         bh = NULL;
1064                 }
1065                 return bh;
1066         }
1067 err:
1068         return NULL;
1069 }
1070
1071 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1072                                ext4_lblk_t block, int create, int *err)
1073 {
1074         struct buffer_head * bh;
1075
1076         bh = ext4_getblk(handle, inode, block, create, err);
1077         if (!bh)
1078                 return bh;
1079         if (buffer_uptodate(bh))
1080                 return bh;
1081         ll_rw_block(READ_META, 1, &bh);
1082         wait_on_buffer(bh);
1083         if (buffer_uptodate(bh))
1084                 return bh;
1085         put_bh(bh);
1086         *err = -EIO;
1087         return NULL;
1088 }
1089
1090 static int walk_page_buffers(   handle_t *handle,
1091                                 struct buffer_head *head,
1092                                 unsigned from,
1093                                 unsigned to,
1094                                 int *partial,
1095                                 int (*fn)(      handle_t *handle,
1096                                                 struct buffer_head *bh))
1097 {
1098         struct buffer_head *bh;
1099         unsigned block_start, block_end;
1100         unsigned blocksize = head->b_size;
1101         int err, ret = 0;
1102         struct buffer_head *next;
1103
1104         for (   bh = head, block_start = 0;
1105                 ret == 0 && (bh != head || !block_start);
1106                 block_start = block_end, bh = next)
1107         {
1108                 next = bh->b_this_page;
1109                 block_end = block_start + blocksize;
1110                 if (block_end <= from || block_start >= to) {
1111                         if (partial && !buffer_uptodate(bh))
1112                                 *partial = 1;
1113                         continue;
1114                 }
1115                 err = (*fn)(handle, bh);
1116                 if (!ret)
1117                         ret = err;
1118         }
1119         return ret;
1120 }
1121
1122 /*
1123  * To preserve ordering, it is essential that the hole instantiation and
1124  * the data write be encapsulated in a single transaction.  We cannot
1125  * close off a transaction and start a new one between the ext4_get_block()
1126  * and the commit_write().  So doing the jbd2_journal_start at the start of
1127  * prepare_write() is the right place.
1128  *
1129  * Also, this function can nest inside ext4_writepage() ->
1130  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1131  * has generated enough buffer credits to do the whole page.  So we won't
1132  * block on the journal in that case, which is good, because the caller may
1133  * be PF_MEMALLOC.
1134  *
1135  * By accident, ext4 can be reentered when a transaction is open via
1136  * quota file writes.  If we were to commit the transaction while thus
1137  * reentered, there can be a deadlock - we would be holding a quota
1138  * lock, and the commit would never complete if another thread had a
1139  * transaction open and was blocking on the quota lock - a ranking
1140  * violation.
1141  *
1142  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1143  * will _not_ run commit under these circumstances because handle->h_ref
1144  * is elevated.  We'll still have enough credits for the tiny quotafile
1145  * write.
1146  */
1147 static int do_journal_get_write_access(handle_t *handle,
1148                                         struct buffer_head *bh)
1149 {
1150         if (!buffer_mapped(bh) || buffer_freed(bh))
1151                 return 0;
1152         return ext4_journal_get_write_access(handle, bh);
1153 }
1154
1155 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1156                                 loff_t pos, unsigned len, unsigned flags,
1157                                 struct page **pagep, void **fsdata)
1158 {
1159         struct inode *inode = mapping->host;
1160         int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1161         handle_t *handle;
1162         int retries = 0;
1163         struct page *page;
1164         pgoff_t index;
1165         unsigned from, to;
1166
1167         index = pos >> PAGE_CACHE_SHIFT;
1168         from = pos & (PAGE_CACHE_SIZE - 1);
1169         to = from + len;
1170
1171 retry:
1172         page = __grab_cache_page(mapping, index);
1173         if (!page)
1174                 return -ENOMEM;
1175         *pagep = page;
1176
1177         handle = ext4_journal_start(inode, needed_blocks);
1178         if (IS_ERR(handle)) {
1179                 unlock_page(page);
1180                 page_cache_release(page);
1181                 ret = PTR_ERR(handle);
1182                 goto out;
1183         }
1184
1185         ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1186                                                         ext4_get_block);
1187
1188         if (!ret && ext4_should_journal_data(inode)) {
1189                 ret = walk_page_buffers(handle, page_buffers(page),
1190                                 from, to, NULL, do_journal_get_write_access);
1191         }
1192
1193         if (ret) {
1194                 ext4_journal_stop(handle);
1195                 unlock_page(page);
1196                 page_cache_release(page);
1197         }
1198
1199         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1200                 goto retry;
1201 out:
1202         return ret;
1203 }
1204
1205 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1206 {
1207         int err = jbd2_journal_dirty_data(handle, bh);
1208         if (err)
1209                 ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1210                                                 bh, handle, err);
1211         return err;
1212 }
1213
1214 /* For write_end() in data=journal mode */
1215 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1216 {
1217         if (!buffer_mapped(bh) || buffer_freed(bh))
1218                 return 0;
1219         set_buffer_uptodate(bh);
1220         return ext4_journal_dirty_metadata(handle, bh);
1221 }
1222
1223 /*
1224  * Generic write_end handler for ordered and writeback ext4 journal modes.
1225  * We can't use generic_write_end, because that unlocks the page and we need to
1226  * unlock the page after ext4_journal_stop, but ext4_journal_stop must run
1227  * after block_write_end.
1228  */
1229 static int ext4_generic_write_end(struct file *file,
1230                                 struct address_space *mapping,
1231                                 loff_t pos, unsigned len, unsigned copied,
1232                                 struct page *page, void *fsdata)
1233 {
1234         struct inode *inode = file->f_mapping->host;
1235
1236         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1237
1238         if (pos+copied > inode->i_size) {
1239                 i_size_write(inode, pos+copied);
1240                 mark_inode_dirty(inode);
1241         }
1242
1243         return copied;
1244 }
1245
1246 /*
1247  * We need to pick up the new inode size which generic_commit_write gave us
1248  * `file' can be NULL - eg, when called from page_symlink().
1249  *
1250  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1251  * buffers are managed internally.
1252  */
1253 static int ext4_ordered_write_end(struct file *file,
1254                                 struct address_space *mapping,
1255                                 loff_t pos, unsigned len, unsigned copied,
1256                                 struct page *page, void *fsdata)
1257 {
1258         handle_t *handle = ext4_journal_current_handle();
1259         struct inode *inode = file->f_mapping->host;
1260         unsigned from, to;
1261         int ret = 0, ret2;
1262
1263         from = pos & (PAGE_CACHE_SIZE - 1);
1264         to = from + len;
1265
1266         ret = walk_page_buffers(handle, page_buffers(page),
1267                 from, to, NULL, ext4_journal_dirty_data);
1268
1269         if (ret == 0) {
1270                 /*
1271                  * generic_write_end() will run mark_inode_dirty() if i_size
1272                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1273                  * into that.
1274                  */
1275                 loff_t new_i_size;
1276
1277                 new_i_size = pos + copied;
1278                 if (new_i_size > EXT4_I(inode)->i_disksize)
1279                         EXT4_I(inode)->i_disksize = new_i_size;
1280                 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1281                                                         page, fsdata);
1282                 if (copied < 0)
1283                         ret = copied;
1284         }
1285         ret2 = ext4_journal_stop(handle);
1286         if (!ret)
1287                 ret = ret2;
1288         unlock_page(page);
1289         page_cache_release(page);
1290
1291         return ret ? ret : copied;
1292 }
1293
1294 static int ext4_writeback_write_end(struct file *file,
1295                                 struct address_space *mapping,
1296                                 loff_t pos, unsigned len, unsigned copied,
1297                                 struct page *page, void *fsdata)
1298 {
1299         handle_t *handle = ext4_journal_current_handle();
1300         struct inode *inode = file->f_mapping->host;
1301         int ret = 0, ret2;
1302         loff_t new_i_size;
1303
1304         new_i_size = pos + copied;
1305         if (new_i_size > EXT4_I(inode)->i_disksize)
1306                 EXT4_I(inode)->i_disksize = new_i_size;
1307
1308         copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1309                                                         page, fsdata);
1310         if (copied < 0)
1311                 ret = copied;
1312
1313         ret2 = ext4_journal_stop(handle);
1314         if (!ret)
1315                 ret = ret2;
1316         unlock_page(page);
1317         page_cache_release(page);
1318
1319         return ret ? ret : copied;
1320 }
1321
1322 static int ext4_journalled_write_end(struct file *file,
1323                                 struct address_space *mapping,
1324                                 loff_t pos, unsigned len, unsigned copied,
1325                                 struct page *page, void *fsdata)
1326 {
1327         handle_t *handle = ext4_journal_current_handle();
1328         struct inode *inode = mapping->host;
1329         int ret = 0, ret2;
1330         int partial = 0;
1331         unsigned from, to;
1332
1333         from = pos & (PAGE_CACHE_SIZE - 1);
1334         to = from + len;
1335
1336         if (copied < len) {
1337                 if (!PageUptodate(page))
1338                         copied = 0;
1339                 page_zero_new_buffers(page, from+copied, to);
1340         }
1341
1342         ret = walk_page_buffers(handle, page_buffers(page), from,
1343                                 to, &partial, write_end_fn);
1344         if (!partial)
1345                 SetPageUptodate(page);
1346         if (pos+copied > inode->i_size)
1347                 i_size_write(inode, pos+copied);
1348         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1349         if (inode->i_size > EXT4_I(inode)->i_disksize) {
1350                 EXT4_I(inode)->i_disksize = inode->i_size;
1351                 ret2 = ext4_mark_inode_dirty(handle, inode);
1352                 if (!ret)
1353                         ret = ret2;
1354         }
1355
1356         ret2 = ext4_journal_stop(handle);
1357         if (!ret)
1358                 ret = ret2;
1359         unlock_page(page);
1360         page_cache_release(page);
1361
1362         return ret ? ret : copied;
1363 }
1364
1365 /*
1366  * bmap() is special.  It gets used by applications such as lilo and by
1367  * the swapper to find the on-disk block of a specific piece of data.
1368  *
1369  * Naturally, this is dangerous if the block concerned is still in the
1370  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1371  * filesystem and enables swap, then they may get a nasty shock when the
1372  * data getting swapped to that swapfile suddenly gets overwritten by
1373  * the original zero's written out previously to the journal and
1374  * awaiting writeback in the kernel's buffer cache.
1375  *
1376  * So, if we see any bmap calls here on a modified, data-journaled file,
1377  * take extra steps to flush any blocks which might be in the cache.
1378  */
1379 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1380 {
1381         struct inode *inode = mapping->host;
1382         journal_t *journal;
1383         int err;
1384
1385         if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1386                 /*
1387                  * This is a REALLY heavyweight approach, but the use of
1388                  * bmap on dirty files is expected to be extremely rare:
1389                  * only if we run lilo or swapon on a freshly made file
1390                  * do we expect this to happen.
1391                  *
1392                  * (bmap requires CAP_SYS_RAWIO so this does not
1393                  * represent an unprivileged user DOS attack --- we'd be
1394                  * in trouble if mortal users could trigger this path at
1395                  * will.)
1396                  *
1397                  * NB. EXT4_STATE_JDATA is not set on files other than
1398                  * regular files.  If somebody wants to bmap a directory
1399                  * or symlink and gets confused because the buffer
1400                  * hasn't yet been flushed to disk, they deserve
1401                  * everything they get.
1402                  */
1403
1404                 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1405                 journal = EXT4_JOURNAL(inode);
1406                 jbd2_journal_lock_updates(journal);
1407                 err = jbd2_journal_flush(journal);
1408                 jbd2_journal_unlock_updates(journal);
1409
1410                 if (err)
1411                         return 0;
1412         }
1413
1414         return generic_block_bmap(mapping,block,ext4_get_block);
1415 }
1416
1417 static int bget_one(handle_t *handle, struct buffer_head *bh)
1418 {
1419         get_bh(bh);
1420         return 0;
1421 }
1422
1423 static int bput_one(handle_t *handle, struct buffer_head *bh)
1424 {
1425         put_bh(bh);
1426         return 0;
1427 }
1428
1429 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1430 {
1431         if (buffer_mapped(bh))
1432                 return ext4_journal_dirty_data(handle, bh);
1433         return 0;
1434 }
1435
1436 /*
1437  * Note that we always start a transaction even if we're not journalling
1438  * data.  This is to preserve ordering: any hole instantiation within
1439  * __block_write_full_page -> ext4_get_block() should be journalled
1440  * along with the data so we don't crash and then get metadata which
1441  * refers to old data.
1442  *
1443  * In all journalling modes block_write_full_page() will start the I/O.
1444  *
1445  * Problem:
1446  *
1447  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1448  *              ext4_writepage()
1449  *
1450  * Similar for:
1451  *
1452  *      ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1453  *
1454  * Same applies to ext4_get_block().  We will deadlock on various things like
1455  * lock_journal and i_truncate_mutex.
1456  *
1457  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1458  * allocations fail.
1459  *
1460  * 16May01: If we're reentered then journal_current_handle() will be
1461  *          non-zero. We simply *return*.
1462  *
1463  * 1 July 2001: @@@ FIXME:
1464  *   In journalled data mode, a data buffer may be metadata against the
1465  *   current transaction.  But the same file is part of a shared mapping
1466  *   and someone does a writepage() on it.
1467  *
1468  *   We will move the buffer onto the async_data list, but *after* it has
1469  *   been dirtied. So there's a small window where we have dirty data on
1470  *   BJ_Metadata.
1471  *
1472  *   Note that this only applies to the last partial page in the file.  The
1473  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1474  *   broken code anyway: it's wrong for msync()).
1475  *
1476  *   It's a rare case: affects the final partial page, for journalled data
1477  *   where the file is subject to bith write() and writepage() in the same
1478  *   transction.  To fix it we'll need a custom block_write_full_page().
1479  *   We'll probably need that anyway for journalling writepage() output.
1480  *
1481  * We don't honour synchronous mounts for writepage().  That would be
1482  * disastrous.  Any write() or metadata operation will sync the fs for
1483  * us.
1484  *
1485  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1486  * we don't need to open a transaction here.
1487  */
1488 static int ext4_ordered_writepage(struct page *page,
1489                                 struct writeback_control *wbc)
1490 {
1491         struct inode *inode = page->mapping->host;
1492         struct buffer_head *page_bufs;
1493         handle_t *handle = NULL;
1494         int ret = 0;
1495         int err;
1496
1497         J_ASSERT(PageLocked(page));
1498
1499         /*
1500          * We give up here if we're reentered, because it might be for a
1501          * different filesystem.
1502          */
1503         if (ext4_journal_current_handle())
1504                 goto out_fail;
1505
1506         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1507
1508         if (IS_ERR(handle)) {
1509                 ret = PTR_ERR(handle);
1510                 goto out_fail;
1511         }
1512
1513         if (!page_has_buffers(page)) {
1514                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1515                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1516         }
1517         page_bufs = page_buffers(page);
1518         walk_page_buffers(handle, page_bufs, 0,
1519                         PAGE_CACHE_SIZE, NULL, bget_one);
1520
1521         ret = block_write_full_page(page, ext4_get_block, wbc);
1522
1523         /*
1524          * The page can become unlocked at any point now, and
1525          * truncate can then come in and change things.  So we
1526          * can't touch *page from now on.  But *page_bufs is
1527          * safe due to elevated refcount.
1528          */
1529
1530         /*
1531          * And attach them to the current transaction.  But only if
1532          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1533          * and generally junk.
1534          */
1535         if (ret == 0) {
1536                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1537                                         NULL, jbd2_journal_dirty_data_fn);
1538                 if (!ret)
1539                         ret = err;
1540         }
1541         walk_page_buffers(handle, page_bufs, 0,
1542                         PAGE_CACHE_SIZE, NULL, bput_one);
1543         err = ext4_journal_stop(handle);
1544         if (!ret)
1545                 ret = err;
1546         return ret;
1547
1548 out_fail:
1549         redirty_page_for_writepage(wbc, page);
1550         unlock_page(page);
1551         return ret;
1552 }
1553
1554 static int ext4_writeback_writepage(struct page *page,
1555                                 struct writeback_control *wbc)
1556 {
1557         struct inode *inode = page->mapping->host;
1558         handle_t *handle = NULL;
1559         int ret = 0;
1560         int err;
1561
1562         if (ext4_journal_current_handle())
1563                 goto out_fail;
1564
1565         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1566         if (IS_ERR(handle)) {
1567                 ret = PTR_ERR(handle);
1568                 goto out_fail;
1569         }
1570
1571         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1572                 ret = nobh_writepage(page, ext4_get_block, wbc);
1573         else
1574                 ret = block_write_full_page(page, ext4_get_block, wbc);
1575
1576         err = ext4_journal_stop(handle);
1577         if (!ret)
1578                 ret = err;
1579         return ret;
1580
1581 out_fail:
1582         redirty_page_for_writepage(wbc, page);
1583         unlock_page(page);
1584         return ret;
1585 }
1586
1587 static int ext4_journalled_writepage(struct page *page,
1588                                 struct writeback_control *wbc)
1589 {
1590         struct inode *inode = page->mapping->host;
1591         handle_t *handle = NULL;
1592         int ret = 0;
1593         int err;
1594
1595         if (ext4_journal_current_handle())
1596                 goto no_write;
1597
1598         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1599         if (IS_ERR(handle)) {
1600                 ret = PTR_ERR(handle);
1601                 goto no_write;
1602         }
1603
1604         if (!page_has_buffers(page) || PageChecked(page)) {
1605                 /*
1606                  * It's mmapped pagecache.  Add buffers and journal it.  There
1607                  * doesn't seem much point in redirtying the page here.
1608                  */
1609                 ClearPageChecked(page);
1610                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1611                                         ext4_get_block);
1612                 if (ret != 0) {
1613                         ext4_journal_stop(handle);
1614                         goto out_unlock;
1615                 }
1616                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1617                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1618
1619                 err = walk_page_buffers(handle, page_buffers(page), 0,
1620                                 PAGE_CACHE_SIZE, NULL, write_end_fn);
1621                 if (ret == 0)
1622                         ret = err;
1623                 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1624                 unlock_page(page);
1625         } else {
1626                 /*
1627                  * It may be a page full of checkpoint-mode buffers.  We don't
1628                  * really know unless we go poke around in the buffer_heads.
1629                  * But block_write_full_page will do the right thing.
1630                  */
1631                 ret = block_write_full_page(page, ext4_get_block, wbc);
1632         }
1633         err = ext4_journal_stop(handle);
1634         if (!ret)
1635                 ret = err;
1636 out:
1637         return ret;
1638
1639 no_write:
1640         redirty_page_for_writepage(wbc, page);
1641 out_unlock:
1642         unlock_page(page);
1643         goto out;
1644 }
1645
1646 static int ext4_readpage(struct file *file, struct page *page)
1647 {
1648         return mpage_readpage(page, ext4_get_block);
1649 }
1650
1651 static int
1652 ext4_readpages(struct file *file, struct address_space *mapping,
1653                 struct list_head *pages, unsigned nr_pages)
1654 {
1655         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1656 }
1657
1658 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1659 {
1660         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1661
1662         /*
1663          * If it's a full truncate we just forget about the pending dirtying
1664          */
1665         if (offset == 0)
1666                 ClearPageChecked(page);
1667
1668         jbd2_journal_invalidatepage(journal, page, offset);
1669 }
1670
1671 static int ext4_releasepage(struct page *page, gfp_t wait)
1672 {
1673         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1674
1675         WARN_ON(PageChecked(page));
1676         if (!page_has_buffers(page))
1677                 return 0;
1678         return jbd2_journal_try_to_free_buffers(journal, page, wait);
1679 }
1680
1681 /*
1682  * If the O_DIRECT write will extend the file then add this inode to the
1683  * orphan list.  So recovery will truncate it back to the original size
1684  * if the machine crashes during the write.
1685  *
1686  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1687  * crashes then stale disk data _may_ be exposed inside the file.
1688  */
1689 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1690                         const struct iovec *iov, loff_t offset,
1691                         unsigned long nr_segs)
1692 {
1693         struct file *file = iocb->ki_filp;
1694         struct inode *inode = file->f_mapping->host;
1695         struct ext4_inode_info *ei = EXT4_I(inode);
1696         handle_t *handle = NULL;
1697         ssize_t ret;
1698         int orphan = 0;
1699         size_t count = iov_length(iov, nr_segs);
1700
1701         if (rw == WRITE) {
1702                 loff_t final_size = offset + count;
1703
1704                 handle = ext4_journal_start(inode, DIO_CREDITS);
1705                 if (IS_ERR(handle)) {
1706                         ret = PTR_ERR(handle);
1707                         goto out;
1708                 }
1709                 if (final_size > inode->i_size) {
1710                         ret = ext4_orphan_add(handle, inode);
1711                         if (ret)
1712                                 goto out_stop;
1713                         orphan = 1;
1714                         ei->i_disksize = inode->i_size;
1715                 }
1716         }
1717
1718         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1719                                  offset, nr_segs,
1720                                  ext4_get_block, NULL);
1721
1722         /*
1723          * Reacquire the handle: ext4_get_block() can restart the transaction
1724          */
1725         handle = ext4_journal_current_handle();
1726
1727 out_stop:
1728         if (handle) {
1729                 int err;
1730
1731                 if (orphan && inode->i_nlink)
1732                         ext4_orphan_del(handle, inode);
1733                 if (orphan && ret > 0) {
1734                         loff_t end = offset + ret;
1735                         if (end > inode->i_size) {
1736                                 ei->i_disksize = end;
1737                                 i_size_write(inode, end);
1738                                 /*
1739                                  * We're going to return a positive `ret'
1740                                  * here due to non-zero-length I/O, so there's
1741                                  * no way of reporting error returns from
1742                                  * ext4_mark_inode_dirty() to userspace.  So
1743                                  * ignore it.
1744                                  */
1745                                 ext4_mark_inode_dirty(handle, inode);
1746                         }
1747                 }
1748                 err = ext4_journal_stop(handle);
1749                 if (ret == 0)
1750                         ret = err;
1751         }
1752 out:
1753         return ret;
1754 }
1755
1756 /*
1757  * Pages can be marked dirty completely asynchronously from ext4's journalling
1758  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1759  * much here because ->set_page_dirty is called under VFS locks.  The page is
1760  * not necessarily locked.
1761  *
1762  * We cannot just dirty the page and leave attached buffers clean, because the
1763  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1764  * or jbddirty because all the journalling code will explode.
1765  *
1766  * So what we do is to mark the page "pending dirty" and next time writepage
1767  * is called, propagate that into the buffers appropriately.
1768  */
1769 static int ext4_journalled_set_page_dirty(struct page *page)
1770 {
1771         SetPageChecked(page);
1772         return __set_page_dirty_nobuffers(page);
1773 }
1774
1775 static const struct address_space_operations ext4_ordered_aops = {
1776         .readpage       = ext4_readpage,
1777         .readpages      = ext4_readpages,
1778         .writepage      = ext4_ordered_writepage,
1779         .sync_page      = block_sync_page,
1780         .write_begin    = ext4_write_begin,
1781         .write_end      = ext4_ordered_write_end,
1782         .bmap           = ext4_bmap,
1783         .invalidatepage = ext4_invalidatepage,
1784         .releasepage    = ext4_releasepage,
1785         .direct_IO      = ext4_direct_IO,
1786         .migratepage    = buffer_migrate_page,
1787 };
1788
1789 static const struct address_space_operations ext4_writeback_aops = {
1790         .readpage       = ext4_readpage,
1791         .readpages      = ext4_readpages,
1792         .writepage      = ext4_writeback_writepage,
1793         .sync_page      = block_sync_page,
1794         .write_begin    = ext4_write_begin,
1795         .write_end      = ext4_writeback_write_end,
1796         .bmap           = ext4_bmap,
1797         .invalidatepage = ext4_invalidatepage,
1798         .releasepage    = ext4_releasepage,
1799         .direct_IO      = ext4_direct_IO,
1800         .migratepage    = buffer_migrate_page,
1801 };
1802
1803 static const struct address_space_operations ext4_journalled_aops = {
1804         .readpage       = ext4_readpage,
1805         .readpages      = ext4_readpages,
1806         .writepage      = ext4_journalled_writepage,
1807         .sync_page      = block_sync_page,
1808         .write_begin    = ext4_write_begin,
1809         .write_end      = ext4_journalled_write_end,
1810         .set_page_dirty = ext4_journalled_set_page_dirty,
1811         .bmap           = ext4_bmap,
1812         .invalidatepage = ext4_invalidatepage,
1813         .releasepage    = ext4_releasepage,
1814 };
1815
1816 void ext4_set_aops(struct inode *inode)
1817 {
1818         if (ext4_should_order_data(inode))
1819                 inode->i_mapping->a_ops = &ext4_ordered_aops;
1820         else if (ext4_should_writeback_data(inode))
1821                 inode->i_mapping->a_ops = &ext4_writeback_aops;
1822         else
1823                 inode->i_mapping->a_ops = &ext4_journalled_aops;
1824 }
1825
1826 /*
1827  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1828  * up to the end of the block which corresponds to `from'.
1829  * This required during truncate. We need to physically zero the tail end
1830  * of that block so it doesn't yield old data if the file is later grown.
1831  */
1832 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1833                 struct address_space *mapping, loff_t from)
1834 {
1835         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1836         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1837         unsigned blocksize, length, pos;
1838         ext4_lblk_t iblock;
1839         struct inode *inode = mapping->host;
1840         struct buffer_head *bh;
1841         int err = 0;
1842
1843         blocksize = inode->i_sb->s_blocksize;
1844         length = blocksize - (offset & (blocksize - 1));
1845         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1846
1847         /*
1848          * For "nobh" option,  we can only work if we don't need to
1849          * read-in the page - otherwise we create buffers to do the IO.
1850          */
1851         if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1852              ext4_should_writeback_data(inode) && PageUptodate(page)) {
1853                 zero_user_page(page, offset, length, KM_USER0);
1854                 set_page_dirty(page);
1855                 goto unlock;
1856         }
1857
1858         if (!page_has_buffers(page))
1859                 create_empty_buffers(page, blocksize, 0);
1860
1861         /* Find the buffer that contains "offset" */
1862         bh = page_buffers(page);
1863         pos = blocksize;
1864         while (offset >= pos) {
1865                 bh = bh->b_this_page;
1866                 iblock++;
1867                 pos += blocksize;
1868         }
1869
1870         err = 0;
1871         if (buffer_freed(bh)) {
1872                 BUFFER_TRACE(bh, "freed: skip");
1873                 goto unlock;
1874         }
1875
1876         if (!buffer_mapped(bh)) {
1877                 BUFFER_TRACE(bh, "unmapped");
1878                 ext4_get_block(inode, iblock, bh, 0);
1879                 /* unmapped? It's a hole - nothing to do */
1880                 if (!buffer_mapped(bh)) {
1881                         BUFFER_TRACE(bh, "still unmapped");
1882                         goto unlock;
1883                 }
1884         }
1885
1886         /* Ok, it's mapped. Make sure it's up-to-date */
1887         if (PageUptodate(page))
1888                 set_buffer_uptodate(bh);
1889
1890         if (!buffer_uptodate(bh)) {
1891                 err = -EIO;
1892                 ll_rw_block(READ, 1, &bh);
1893                 wait_on_buffer(bh);
1894                 /* Uhhuh. Read error. Complain and punt. */
1895                 if (!buffer_uptodate(bh))
1896                         goto unlock;
1897         }
1898
1899         if (ext4_should_journal_data(inode)) {
1900                 BUFFER_TRACE(bh, "get write access");
1901                 err = ext4_journal_get_write_access(handle, bh);
1902                 if (err)
1903                         goto unlock;
1904         }
1905
1906         zero_user_page(page, offset, length, KM_USER0);
1907
1908         BUFFER_TRACE(bh, "zeroed end of block");
1909
1910         err = 0;
1911         if (ext4_should_journal_data(inode)) {
1912                 err = ext4_journal_dirty_metadata(handle, bh);
1913         } else {
1914                 if (ext4_should_order_data(inode))
1915                         err = ext4_journal_dirty_data(handle, bh);
1916                 mark_buffer_dirty(bh);
1917         }
1918
1919 unlock:
1920         unlock_page(page);
1921         page_cache_release(page);
1922         return err;
1923 }
1924
1925 /*
1926  * Probably it should be a library function... search for first non-zero word
1927  * or memcmp with zero_page, whatever is better for particular architecture.
1928  * Linus?
1929  */
1930 static inline int all_zeroes(__le32 *p, __le32 *q)
1931 {
1932         while (p < q)
1933                 if (*p++)
1934                         return 0;
1935         return 1;
1936 }
1937
1938 /**
1939  *      ext4_find_shared - find the indirect blocks for partial truncation.
1940  *      @inode:   inode in question
1941  *      @depth:   depth of the affected branch
1942  *      @offsets: offsets of pointers in that branch (see ext4_block_to_path)
1943  *      @chain:   place to store the pointers to partial indirect blocks
1944  *      @top:     place to the (detached) top of branch
1945  *
1946  *      This is a helper function used by ext4_truncate().
1947  *
1948  *      When we do truncate() we may have to clean the ends of several
1949  *      indirect blocks but leave the blocks themselves alive. Block is
1950  *      partially truncated if some data below the new i_size is refered
1951  *      from it (and it is on the path to the first completely truncated
1952  *      data block, indeed).  We have to free the top of that path along
1953  *      with everything to the right of the path. Since no allocation
1954  *      past the truncation point is possible until ext4_truncate()
1955  *      finishes, we may safely do the latter, but top of branch may
1956  *      require special attention - pageout below the truncation point
1957  *      might try to populate it.
1958  *
1959  *      We atomically detach the top of branch from the tree, store the
1960  *      block number of its root in *@top, pointers to buffer_heads of
1961  *      partially truncated blocks - in @chain[].bh and pointers to
1962  *      their last elements that should not be removed - in
1963  *      @chain[].p. Return value is the pointer to last filled element
1964  *      of @chain.
1965  *
1966  *      The work left to caller to do the actual freeing of subtrees:
1967  *              a) free the subtree starting from *@top
1968  *              b) free the subtrees whose roots are stored in
1969  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1970  *              c) free the subtrees growing from the inode past the @chain[0].
1971  *                      (no partially truncated stuff there).  */
1972
1973 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1974                         ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
1975 {
1976         Indirect *partial, *p;
1977         int k, err;
1978
1979         *top = 0;
1980         /* Make k index the deepest non-null offest + 1 */
1981         for (k = depth; k > 1 && !offsets[k-1]; k--)
1982                 ;
1983         partial = ext4_get_branch(inode, k, offsets, chain, &err);
1984         /* Writer: pointers */
1985         if (!partial)
1986                 partial = chain + k-1;
1987         /*
1988          * If the branch acquired continuation since we've looked at it -
1989          * fine, it should all survive and (new) top doesn't belong to us.
1990          */
1991         if (!partial->key && *partial->p)
1992                 /* Writer: end */
1993                 goto no_top;
1994         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1995                 ;
1996         /*
1997          * OK, we've found the last block that must survive. The rest of our
1998          * branch should be detached before unlocking. However, if that rest
1999          * of branch is all ours and does not grow immediately from the inode
2000          * it's easier to cheat and just decrement partial->p.
2001          */
2002         if (p == chain + k - 1 && p > chain) {
2003                 p->p--;
2004         } else {
2005                 *top = *p->p;
2006                 /* Nope, don't do this in ext4.  Must leave the tree intact */
2007 #if 0
2008                 *p->p = 0;
2009 #endif
2010         }
2011         /* Writer: end */
2012
2013         while(partial > p) {
2014                 brelse(partial->bh);
2015                 partial--;
2016         }
2017 no_top:
2018         return partial;
2019 }
2020
2021 /*
2022  * Zero a number of block pointers in either an inode or an indirect block.
2023  * If we restart the transaction we must again get write access to the
2024  * indirect block for further modification.
2025  *
2026  * We release `count' blocks on disk, but (last - first) may be greater
2027  * than `count' because there can be holes in there.
2028  */
2029 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2030                 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2031                 unsigned long count, __le32 *first, __le32 *last)
2032 {
2033         __le32 *p;
2034         if (try_to_extend_transaction(handle, inode)) {
2035                 if (bh) {
2036                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2037                         ext4_journal_dirty_metadata(handle, bh);
2038                 }
2039                 ext4_mark_inode_dirty(handle, inode);
2040                 ext4_journal_test_restart(handle, inode);
2041                 if (bh) {
2042                         BUFFER_TRACE(bh, "retaking write access");
2043                         ext4_journal_get_write_access(handle, bh);
2044                 }
2045         }
2046
2047         /*
2048          * Any buffers which are on the journal will be in memory. We find
2049          * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2050          * on them.  We've already detached each block from the file, so
2051          * bforget() in jbd2_journal_forget() should be safe.
2052          *
2053          * AKPM: turn on bforget in jbd2_journal_forget()!!!
2054          */
2055         for (p = first; p < last; p++) {
2056                 u32 nr = le32_to_cpu(*p);
2057                 if (nr) {
2058                         struct buffer_head *tbh;
2059
2060                         *p = 0;
2061                         tbh = sb_find_get_block(inode->i_sb, nr);
2062                         ext4_forget(handle, 0, inode, tbh, nr);
2063                 }
2064         }
2065
2066         ext4_free_blocks(handle, inode, block_to_free, count);
2067 }
2068
2069 /**
2070  * ext4_free_data - free a list of data blocks
2071  * @handle:     handle for this transaction
2072  * @inode:      inode we are dealing with
2073  * @this_bh:    indirect buffer_head which contains *@first and *@last
2074  * @first:      array of block numbers
2075  * @last:       points immediately past the end of array
2076  *
2077  * We are freeing all blocks refered from that array (numbers are stored as
2078  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2079  *
2080  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2081  * blocks are contiguous then releasing them at one time will only affect one
2082  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2083  * actually use a lot of journal space.
2084  *
2085  * @this_bh will be %NULL if @first and @last point into the inode's direct
2086  * block pointers.
2087  */
2088 static void ext4_free_data(handle_t *handle, struct inode *inode,
2089                            struct buffer_head *this_bh,
2090                            __le32 *first, __le32 *last)
2091 {
2092         ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2093         unsigned long count = 0;            /* Number of blocks in the run */
2094         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
2095                                                corresponding to
2096                                                block_to_free */
2097         ext4_fsblk_t nr;                    /* Current block # */
2098         __le32 *p;                          /* Pointer into inode/ind
2099                                                for current block */
2100         int err;
2101
2102         if (this_bh) {                          /* For indirect block */
2103                 BUFFER_TRACE(this_bh, "get_write_access");
2104                 err = ext4_journal_get_write_access(handle, this_bh);
2105                 /* Important: if we can't update the indirect pointers
2106                  * to the blocks, we can't free them. */
2107                 if (err)
2108                         return;
2109         }
2110
2111         for (p = first; p < last; p++) {
2112                 nr = le32_to_cpu(*p);
2113                 if (nr) {
2114                         /* accumulate blocks to free if they're contiguous */
2115                         if (count == 0) {
2116                                 block_to_free = nr;
2117                                 block_to_free_p = p;
2118                                 count = 1;
2119                         } else if (nr == block_to_free + count) {
2120                                 count++;
2121                         } else {
2122                                 ext4_clear_blocks(handle, inode, this_bh,
2123                                                   block_to_free,
2124                                                   count, block_to_free_p, p);
2125                                 block_to_free = nr;
2126                                 block_to_free_p = p;
2127                                 count = 1;
2128                         }
2129                 }
2130         }
2131
2132         if (count > 0)
2133                 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2134                                   count, block_to_free_p, p);
2135
2136         if (this_bh) {
2137                 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2138                 ext4_journal_dirty_metadata(handle, this_bh);
2139         }
2140 }
2141
2142 /**
2143  *      ext4_free_branches - free an array of branches
2144  *      @handle: JBD handle for this transaction
2145  *      @inode: inode we are dealing with
2146  *      @parent_bh: the buffer_head which contains *@first and *@last
2147  *      @first: array of block numbers
2148  *      @last:  pointer immediately past the end of array
2149  *      @depth: depth of the branches to free
2150  *
2151  *      We are freeing all blocks refered from these branches (numbers are
2152  *      stored as little-endian 32-bit) and updating @inode->i_blocks
2153  *      appropriately.
2154  */
2155 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2156                                struct buffer_head *parent_bh,
2157                                __le32 *first, __le32 *last, int depth)
2158 {
2159         ext4_fsblk_t nr;
2160         __le32 *p;
2161
2162         if (is_handle_aborted(handle))
2163                 return;
2164
2165         if (depth--) {
2166                 struct buffer_head *bh;
2167                 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2168                 p = last;
2169                 while (--p >= first) {
2170                         nr = le32_to_cpu(*p);
2171                         if (!nr)
2172                                 continue;               /* A hole */
2173
2174                         /* Go read the buffer for the next level down */
2175                         bh = sb_bread(inode->i_sb, nr);
2176
2177                         /*
2178                          * A read failure? Report error and clear slot
2179                          * (should be rare).
2180                          */
2181                         if (!bh) {
2182                                 ext4_error(inode->i_sb, "ext4_free_branches",
2183                                            "Read failure, inode=%lu, block=%llu",
2184                                            inode->i_ino, nr);
2185                                 continue;
2186                         }
2187
2188                         /* This zaps the entire block.  Bottom up. */
2189                         BUFFER_TRACE(bh, "free child branches");
2190                         ext4_free_branches(handle, inode, bh,
2191                                            (__le32*)bh->b_data,
2192                                            (__le32*)bh->b_data + addr_per_block,
2193                                            depth);
2194
2195                         /*
2196                          * We've probably journalled the indirect block several
2197                          * times during the truncate.  But it's no longer
2198                          * needed and we now drop it from the transaction via
2199                          * jbd2_journal_revoke().
2200                          *
2201                          * That's easy if it's exclusively part of this
2202                          * transaction.  But if it's part of the committing
2203                          * transaction then jbd2_journal_forget() will simply
2204                          * brelse() it.  That means that if the underlying
2205                          * block is reallocated in ext4_get_block(),
2206                          * unmap_underlying_metadata() will find this block
2207                          * and will try to get rid of it.  damn, damn.
2208                          *
2209                          * If this block has already been committed to the
2210                          * journal, a revoke record will be written.  And
2211                          * revoke records must be emitted *before* clearing
2212                          * this block's bit in the bitmaps.
2213                          */
2214                         ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2215
2216                         /*
2217                          * Everything below this this pointer has been
2218                          * released.  Now let this top-of-subtree go.
2219                          *
2220                          * We want the freeing of this indirect block to be
2221                          * atomic in the journal with the updating of the
2222                          * bitmap block which owns it.  So make some room in
2223                          * the journal.
2224                          *
2225                          * We zero the parent pointer *after* freeing its
2226                          * pointee in the bitmaps, so if extend_transaction()
2227                          * for some reason fails to put the bitmap changes and
2228                          * the release into the same transaction, recovery
2229                          * will merely complain about releasing a free block,
2230                          * rather than leaking blocks.
2231                          */
2232                         if (is_handle_aborted(handle))
2233                                 return;
2234                         if (try_to_extend_transaction(handle, inode)) {
2235                                 ext4_mark_inode_dirty(handle, inode);
2236                                 ext4_journal_test_restart(handle, inode);
2237                         }
2238
2239                         ext4_free_blocks(handle, inode, nr, 1);
2240
2241                         if (parent_bh) {
2242                                 /*
2243                                  * The block which we have just freed is
2244                                  * pointed to by an indirect block: journal it
2245                                  */
2246                                 BUFFER_TRACE(parent_bh, "get_write_access");
2247                                 if (!ext4_journal_get_write_access(handle,
2248                                                                    parent_bh)){
2249                                         *p = 0;
2250                                         BUFFER_TRACE(parent_bh,
2251                                         "call ext4_journal_dirty_metadata");
2252                                         ext4_journal_dirty_metadata(handle,
2253                                                                     parent_bh);
2254                                 }
2255                         }
2256                 }
2257         } else {
2258                 /* We have reached the bottom of the tree. */
2259                 BUFFER_TRACE(parent_bh, "free data blocks");
2260                 ext4_free_data(handle, inode, parent_bh, first, last);
2261         }
2262 }
2263
2264 /*
2265  * ext4_truncate()
2266  *
2267  * We block out ext4_get_block() block instantiations across the entire
2268  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2269  * simultaneously on behalf of the same inode.
2270  *
2271  * As we work through the truncate and commmit bits of it to the journal there
2272  * is one core, guiding principle: the file's tree must always be consistent on
2273  * disk.  We must be able to restart the truncate after a crash.
2274  *
2275  * The file's tree may be transiently inconsistent in memory (although it
2276  * probably isn't), but whenever we close off and commit a journal transaction,
2277  * the contents of (the filesystem + the journal) must be consistent and
2278  * restartable.  It's pretty simple, really: bottom up, right to left (although
2279  * left-to-right works OK too).
2280  *
2281  * Note that at recovery time, journal replay occurs *before* the restart of
2282  * truncate against the orphan inode list.
2283  *
2284  * The committed inode has the new, desired i_size (which is the same as
2285  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2286  * that this inode's truncate did not complete and it will again call
2287  * ext4_truncate() to have another go.  So there will be instantiated blocks
2288  * to the right of the truncation point in a crashed ext4 filesystem.  But
2289  * that's fine - as long as they are linked from the inode, the post-crash
2290  * ext4_truncate() run will find them and release them.
2291  */
2292 void ext4_truncate(struct inode *inode)
2293 {
2294         handle_t *handle;
2295         struct ext4_inode_info *ei = EXT4_I(inode);
2296         __le32 *i_data = ei->i_data;
2297         int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2298         struct address_space *mapping = inode->i_mapping;
2299         ext4_lblk_t offsets[4];
2300         Indirect chain[4];
2301         Indirect *partial;
2302         __le32 nr = 0;
2303         int n;
2304         ext4_lblk_t last_block;
2305         unsigned blocksize = inode->i_sb->s_blocksize;
2306         struct page *page;
2307
2308         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2309             S_ISLNK(inode->i_mode)))
2310                 return;
2311         if (ext4_inode_is_fast_symlink(inode))
2312                 return;
2313         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2314                 return;
2315
2316         /*
2317          * We have to lock the EOF page here, because lock_page() nests
2318          * outside jbd2_journal_start().
2319          */
2320         if ((inode->i_size & (blocksize - 1)) == 0) {
2321                 /* Block boundary? Nothing to do */
2322                 page = NULL;
2323         } else {
2324                 page = grab_cache_page(mapping,
2325                                 inode->i_size >> PAGE_CACHE_SHIFT);
2326                 if (!page)
2327                         return;
2328         }
2329
2330         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2331                 ext4_ext_truncate(inode, page);
2332                 return;
2333         }
2334
2335         handle = start_transaction(inode);
2336         if (IS_ERR(handle)) {
2337                 if (page) {
2338                         clear_highpage(page);
2339                         flush_dcache_page(page);
2340                         unlock_page(page);
2341                         page_cache_release(page);
2342                 }
2343                 return;         /* AKPM: return what? */
2344         }
2345
2346         last_block = (inode->i_size + blocksize-1)
2347                                         >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2348
2349         if (page)
2350                 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2351
2352         n = ext4_block_to_path(inode, last_block, offsets, NULL);
2353         if (n == 0)
2354                 goto out_stop;  /* error */
2355
2356         /*
2357          * OK.  This truncate is going to happen.  We add the inode to the
2358          * orphan list, so that if this truncate spans multiple transactions,
2359          * and we crash, we will resume the truncate when the filesystem
2360          * recovers.  It also marks the inode dirty, to catch the new size.
2361          *
2362          * Implication: the file must always be in a sane, consistent
2363          * truncatable state while each transaction commits.
2364          */
2365         if (ext4_orphan_add(handle, inode))
2366                 goto out_stop;
2367
2368         /*
2369          * The orphan list entry will now protect us from any crash which
2370          * occurs before the truncate completes, so it is now safe to propagate
2371          * the new, shorter inode size (held for now in i_size) into the
2372          * on-disk inode. We do this via i_disksize, which is the value which
2373          * ext4 *really* writes onto the disk inode.
2374          */
2375         ei->i_disksize = inode->i_size;
2376
2377         /*
2378          * From here we block out all ext4_get_block() callers who want to
2379          * modify the block allocation tree.
2380          */
2381         mutex_lock(&ei->truncate_mutex);
2382
2383         if (n == 1) {           /* direct blocks */
2384                 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2385                                i_data + EXT4_NDIR_BLOCKS);
2386                 goto do_indirects;
2387         }
2388
2389         partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2390         /* Kill the top of shared branch (not detached) */
2391         if (nr) {
2392                 if (partial == chain) {
2393                         /* Shared branch grows from the inode */
2394                         ext4_free_branches(handle, inode, NULL,
2395                                            &nr, &nr+1, (chain+n-1) - partial);
2396                         *partial->p = 0;
2397                         /*
2398                          * We mark the inode dirty prior to restart,
2399                          * and prior to stop.  No need for it here.
2400                          */
2401                 } else {
2402                         /* Shared branch grows from an indirect block */
2403                         BUFFER_TRACE(partial->bh, "get_write_access");
2404                         ext4_free_branches(handle, inode, partial->bh,
2405                                         partial->p,
2406                                         partial->p+1, (chain+n-1) - partial);
2407                 }
2408         }
2409         /* Clear the ends of indirect blocks on the shared branch */
2410         while (partial > chain) {
2411                 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2412                                    (__le32*)partial->bh->b_data+addr_per_block,
2413                                    (chain+n-1) - partial);
2414                 BUFFER_TRACE(partial->bh, "call brelse");
2415                 brelse (partial->bh);
2416                 partial--;
2417         }
2418 do_indirects:
2419         /* Kill the remaining (whole) subtrees */
2420         switch (offsets[0]) {
2421         default:
2422                 nr = i_data[EXT4_IND_BLOCK];
2423                 if (nr) {
2424                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2425                         i_data[EXT4_IND_BLOCK] = 0;
2426                 }
2427         case EXT4_IND_BLOCK:
2428                 nr = i_data[EXT4_DIND_BLOCK];
2429                 if (nr) {
2430                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2431                         i_data[EXT4_DIND_BLOCK] = 0;
2432                 }
2433         case EXT4_DIND_BLOCK:
2434                 nr = i_data[EXT4_TIND_BLOCK];
2435                 if (nr) {
2436                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2437                         i_data[EXT4_TIND_BLOCK] = 0;
2438                 }
2439         case EXT4_TIND_BLOCK:
2440                 ;
2441         }
2442
2443         ext4_discard_reservation(inode);
2444
2445         mutex_unlock(&ei->truncate_mutex);
2446         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2447         ext4_mark_inode_dirty(handle, inode);
2448
2449         /*
2450          * In a multi-transaction truncate, we only make the final transaction
2451          * synchronous
2452          */
2453         if (IS_SYNC(inode))
2454                 handle->h_sync = 1;
2455 out_stop:
2456         /*
2457          * If this was a simple ftruncate(), and the file will remain alive
2458          * then we need to clear up the orphan record which we created above.
2459          * However, if this was a real unlink then we were called by
2460          * ext4_delete_inode(), and we allow that function to clean up the
2461          * orphan info for us.
2462          */
2463         if (inode->i_nlink)
2464                 ext4_orphan_del(handle, inode);
2465
2466         ext4_journal_stop(handle);
2467 }
2468
2469 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2470                 unsigned long ino, struct ext4_iloc *iloc)
2471 {
2472         unsigned long desc, group_desc;
2473         ext4_group_t block_group;
2474         unsigned long offset;
2475         ext4_fsblk_t block;
2476         struct buffer_head *bh;
2477         struct ext4_group_desc * gdp;
2478
2479         if (!ext4_valid_inum(sb, ino)) {
2480                 /*
2481                  * This error is already checked for in namei.c unless we are
2482                  * looking at an NFS filehandle, in which case no error
2483                  * report is needed
2484                  */
2485                 return 0;
2486         }
2487
2488         block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2489         if (block_group >= EXT4_SB(sb)->s_groups_count) {
2490                 ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2491                 return 0;
2492         }
2493         smp_rmb();
2494         group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2495         desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2496         bh = EXT4_SB(sb)->s_group_desc[group_desc];
2497         if (!bh) {
2498                 ext4_error (sb, "ext4_get_inode_block",
2499                             "Descriptor not loaded");
2500                 return 0;
2501         }
2502
2503         gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2504                 desc * EXT4_DESC_SIZE(sb));
2505         /*
2506          * Figure out the offset within the block group inode table
2507          */
2508         offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2509                 EXT4_INODE_SIZE(sb);
2510         block = ext4_inode_table(sb, gdp) +
2511                 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2512
2513         iloc->block_group = block_group;
2514         iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2515         return block;
2516 }
2517
2518 /*
2519  * ext4_get_inode_loc returns with an extra refcount against the inode's
2520  * underlying buffer_head on success. If 'in_mem' is true, we have all
2521  * data in memory that is needed to recreate the on-disk version of this
2522  * inode.
2523  */
2524 static int __ext4_get_inode_loc(struct inode *inode,
2525                                 struct ext4_iloc *iloc, int in_mem)
2526 {
2527         ext4_fsblk_t block;
2528         struct buffer_head *bh;
2529
2530         block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2531         if (!block)
2532                 return -EIO;
2533
2534         bh = sb_getblk(inode->i_sb, block);
2535         if (!bh) {
2536                 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2537                                 "unable to read inode block - "
2538                                 "inode=%lu, block=%llu",
2539                                  inode->i_ino, block);
2540                 return -EIO;
2541         }
2542         if (!buffer_uptodate(bh)) {
2543                 lock_buffer(bh);
2544                 if (buffer_uptodate(bh)) {
2545                         /* someone brought it uptodate while we waited */
2546                         unlock_buffer(bh);
2547                         goto has_buffer;
2548                 }
2549
2550                 /*
2551                  * If we have all information of the inode in memory and this
2552                  * is the only valid inode in the block, we need not read the
2553                  * block.
2554                  */
2555                 if (in_mem) {
2556                         struct buffer_head *bitmap_bh;
2557                         struct ext4_group_desc *desc;
2558                         int inodes_per_buffer;
2559                         int inode_offset, i;
2560                         ext4_group_t block_group;
2561                         int start;
2562
2563                         block_group = (inode->i_ino - 1) /
2564                                         EXT4_INODES_PER_GROUP(inode->i_sb);
2565                         inodes_per_buffer = bh->b_size /
2566                                 EXT4_INODE_SIZE(inode->i_sb);
2567                         inode_offset = ((inode->i_ino - 1) %
2568                                         EXT4_INODES_PER_GROUP(inode->i_sb));
2569                         start = inode_offset & ~(inodes_per_buffer - 1);
2570
2571                         /* Is the inode bitmap in cache? */
2572                         desc = ext4_get_group_desc(inode->i_sb,
2573                                                 block_group, NULL);
2574                         if (!desc)
2575                                 goto make_io;
2576
2577                         bitmap_bh = sb_getblk(inode->i_sb,
2578                                 ext4_inode_bitmap(inode->i_sb, desc));
2579                         if (!bitmap_bh)
2580                                 goto make_io;
2581
2582                         /*
2583                          * If the inode bitmap isn't in cache then the
2584                          * optimisation may end up performing two reads instead
2585                          * of one, so skip it.
2586                          */
2587                         if (!buffer_uptodate(bitmap_bh)) {
2588                                 brelse(bitmap_bh);
2589                                 goto make_io;
2590                         }
2591                         for (i = start; i < start + inodes_per_buffer; i++) {
2592                                 if (i == inode_offset)
2593                                         continue;
2594                                 if (ext4_test_bit(i, bitmap_bh->b_data))
2595                                         break;
2596                         }
2597                         brelse(bitmap_bh);
2598                         if (i == start + inodes_per_buffer) {
2599                                 /* all other inodes are free, so skip I/O */
2600                                 memset(bh->b_data, 0, bh->b_size);
2601                                 set_buffer_uptodate(bh);
2602                                 unlock_buffer(bh);
2603                                 goto has_buffer;
2604                         }
2605                 }
2606
2607 make_io:
2608                 /*
2609                  * There are other valid inodes in the buffer, this inode
2610                  * has in-inode xattrs, or we don't have this inode in memory.
2611                  * Read the block from disk.
2612                  */
2613                 get_bh(bh);
2614                 bh->b_end_io = end_buffer_read_sync;
2615                 submit_bh(READ_META, bh);
2616                 wait_on_buffer(bh);
2617                 if (!buffer_uptodate(bh)) {
2618                         ext4_error(inode->i_sb, "ext4_get_inode_loc",
2619                                         "unable to read inode block - "
2620                                         "inode=%lu, block=%llu",
2621                                         inode->i_ino, block);
2622                         brelse(bh);
2623                         return -EIO;
2624                 }
2625         }
2626 has_buffer:
2627         iloc->bh = bh;
2628         return 0;
2629 }
2630
2631 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2632 {
2633         /* We have all inode data except xattrs in memory here. */
2634         return __ext4_get_inode_loc(inode, iloc,
2635                 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2636 }
2637
2638 void ext4_set_inode_flags(struct inode *inode)
2639 {
2640         unsigned int flags = EXT4_I(inode)->i_flags;
2641
2642         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2643         if (flags & EXT4_SYNC_FL)
2644                 inode->i_flags |= S_SYNC;
2645         if (flags & EXT4_APPEND_FL)
2646                 inode->i_flags |= S_APPEND;
2647         if (flags & EXT4_IMMUTABLE_FL)
2648                 inode->i_flags |= S_IMMUTABLE;
2649         if (flags & EXT4_NOATIME_FL)
2650                 inode->i_flags |= S_NOATIME;
2651         if (flags & EXT4_DIRSYNC_FL)
2652                 inode->i_flags |= S_DIRSYNC;
2653 }
2654
2655 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2656 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2657 {
2658         unsigned int flags = ei->vfs_inode.i_flags;
2659
2660         ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2661                         EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2662         if (flags & S_SYNC)
2663                 ei->i_flags |= EXT4_SYNC_FL;
2664         if (flags & S_APPEND)
2665                 ei->i_flags |= EXT4_APPEND_FL;
2666         if (flags & S_IMMUTABLE)
2667                 ei->i_flags |= EXT4_IMMUTABLE_FL;
2668         if (flags & S_NOATIME)
2669                 ei->i_flags |= EXT4_NOATIME_FL;
2670         if (flags & S_DIRSYNC)
2671                 ei->i_flags |= EXT4_DIRSYNC_FL;
2672 }
2673 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2674                                         struct ext4_inode_info *ei)
2675 {
2676         blkcnt_t i_blocks ;
2677         struct inode *inode = &(ei->vfs_inode);
2678         struct super_block *sb = inode->i_sb;
2679
2680         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2681                                 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2682                 /* we are using combined 48 bit field */
2683                 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2684                                         le32_to_cpu(raw_inode->i_blocks_lo);
2685                 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2686                         /* i_blocks represent file system block size */
2687                         return i_blocks  << (inode->i_blkbits - 9);
2688                 } else {
2689                         return i_blocks;
2690                 }
2691         } else {
2692                 return le32_to_cpu(raw_inode->i_blocks_lo);
2693         }
2694 }
2695
2696 void ext4_read_inode(struct inode * inode)
2697 {
2698         struct ext4_iloc iloc;
2699         struct ext4_inode *raw_inode;
2700         struct ext4_inode_info *ei = EXT4_I(inode);
2701         struct buffer_head *bh;
2702         int block;
2703
2704 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2705         ei->i_acl = EXT4_ACL_NOT_CACHED;
2706         ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2707 #endif
2708         ei->i_block_alloc_info = NULL;
2709
2710         if (__ext4_get_inode_loc(inode, &iloc, 0))
2711                 goto bad_inode;
2712         bh = iloc.bh;
2713         raw_inode = ext4_raw_inode(&iloc);
2714         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2715         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2716         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2717         if(!(test_opt (inode->i_sb, NO_UID32))) {
2718                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2719                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2720         }
2721         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2722
2723         ei->i_state = 0;
2724         ei->i_dir_start_lookup = 0;
2725         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2726         /* We now have enough fields to check if the inode was active or not.
2727          * This is needed because nfsd might try to access dead inodes
2728          * the test is that same one that e2fsck uses
2729          * NeilBrown 1999oct15
2730          */
2731         if (inode->i_nlink == 0) {
2732                 if (inode->i_mode == 0 ||
2733                     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2734                         /* this inode is deleted */
2735                         brelse (bh);
2736                         goto bad_inode;
2737                 }
2738                 /* The only unlinked inodes we let through here have
2739                  * valid i_mode and are being read by the orphan
2740                  * recovery code: that's fine, we're about to complete
2741                  * the process of deleting those. */
2742         }
2743         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2744         inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2745         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2746         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2747             cpu_to_le32(EXT4_OS_HURD)) {
2748                 ei->i_file_acl |=
2749                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2750         }
2751         inode->i_size = ext4_isize(raw_inode);
2752         ei->i_disksize = inode->i_size;
2753         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2754         ei->i_block_group = iloc.block_group;
2755         /*
2756          * NOTE! The in-memory inode i_data array is in little-endian order
2757          * even on big-endian machines: we do NOT byteswap the block numbers!
2758          */
2759         for (block = 0; block < EXT4_N_BLOCKS; block++)
2760                 ei->i_data[block] = raw_inode->i_block[block];
2761         INIT_LIST_HEAD(&ei->i_orphan);
2762
2763         if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 &&
2764             EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2765                 /*
2766                  * When mke2fs creates big inodes it does not zero out
2767                  * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE,
2768                  * so ignore those first few inodes.
2769                  */
2770                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2771                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2772                     EXT4_INODE_SIZE(inode->i_sb)) {
2773                         brelse (bh);
2774                         goto bad_inode;
2775                 }
2776                 if (ei->i_extra_isize == 0) {
2777                         /* The extra space is currently unused. Use it. */
2778                         ei->i_extra_isize = sizeof(struct ext4_inode) -
2779                                             EXT4_GOOD_OLD_INODE_SIZE;
2780                 } else {
2781                         __le32 *magic = (void *)raw_inode +
2782                                         EXT4_GOOD_OLD_INODE_SIZE +
2783                                         ei->i_extra_isize;
2784                         if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2785                                  ei->i_state |= EXT4_STATE_XATTR;
2786                 }
2787         } else
2788                 ei->i_extra_isize = 0;
2789
2790         EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2791         EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2792         EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2793         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2794
2795         if (S_ISREG(inode->i_mode)) {
2796                 inode->i_op = &ext4_file_inode_operations;
2797                 inode->i_fop = &ext4_file_operations;
2798                 ext4_set_aops(inode);
2799         } else if (S_ISDIR(inode->i_mode)) {
2800                 inode->i_op = &ext4_dir_inode_operations;
2801                 inode->i_fop = &ext4_dir_operations;
2802         } else if (S_ISLNK(inode->i_mode)) {
2803                 if (ext4_inode_is_fast_symlink(inode))
2804                         inode->i_op = &ext4_fast_symlink_inode_operations;
2805                 else {
2806                         inode->i_op = &ext4_symlink_inode_operations;
2807                         ext4_set_aops(inode);
2808                 }
2809         } else {
2810                 inode->i_op = &ext4_special_inode_operations;
2811                 if (raw_inode->i_block[0])
2812                         init_special_inode(inode, inode->i_mode,
2813                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2814                 else
2815                         init_special_inode(inode, inode->i_mode,
2816                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2817         }
2818         brelse (iloc.bh);
2819         ext4_set_inode_flags(inode);
2820         return;
2821
2822 bad_inode:
2823         make_bad_inode(inode);
2824         return;
2825 }
2826
2827 static int ext4_inode_blocks_set(handle_t *handle,
2828                                 struct ext4_inode *raw_inode,
2829                                 struct ext4_inode_info *ei)
2830 {
2831         struct inode *inode = &(ei->vfs_inode);
2832         u64 i_blocks = inode->i_blocks;
2833         struct super_block *sb = inode->i_sb;
2834         int err = 0;
2835
2836         if (i_blocks <= ~0U) {
2837                 /*
2838                  * i_blocks can be represnted in a 32 bit variable
2839                  * as multiple of 512 bytes
2840                  */
2841                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2842                 raw_inode->i_blocks_high = 0;
2843                 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2844         } else if (i_blocks <= 0xffffffffffffULL) {
2845                 /*
2846                  * i_blocks can be represented in a 48 bit variable
2847                  * as multiple of 512 bytes
2848                  */
2849                 err = ext4_update_rocompat_feature(handle, sb,
2850                                             EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2851                 if (err)
2852                         goto  err_out;
2853                 /* i_block is stored in the split  48 bit fields */
2854                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2855                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2856                 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2857         } else {
2858                 /*
2859                  * i_blocks should be represented in a 48 bit variable
2860                  * as multiple of  file system block size
2861                  */
2862                 err = ext4_update_rocompat_feature(handle, sb,
2863                                             EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2864                 if (err)
2865                         goto  err_out;
2866                 ei->i_flags |= EXT4_HUGE_FILE_FL;
2867                 /* i_block is stored in file system block size */
2868                 i_blocks = i_blocks >> (inode->i_blkbits - 9);
2869                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2870                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2871         }
2872 err_out:
2873         return err;
2874 }
2875
2876 /*
2877  * Post the struct inode info into an on-disk inode location in the
2878  * buffer-cache.  This gobbles the caller's reference to the
2879  * buffer_head in the inode location struct.
2880  *
2881  * The caller must have write access to iloc->bh.
2882  */
2883 static int ext4_do_update_inode(handle_t *handle,
2884                                 struct inode *inode,
2885                                 struct ext4_iloc *iloc)
2886 {
2887         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2888         struct ext4_inode_info *ei = EXT4_I(inode);
2889         struct buffer_head *bh = iloc->bh;
2890         int err = 0, rc, block;
2891
2892         /* For fields not not tracking in the in-memory inode,
2893          * initialise them to zero for new inodes. */
2894         if (ei->i_state & EXT4_STATE_NEW)
2895                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2896
2897         ext4_get_inode_flags(ei);
2898         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2899         if(!(test_opt(inode->i_sb, NO_UID32))) {
2900                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2901                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2902 /*
2903  * Fix up interoperability with old kernels. Otherwise, old inodes get
2904  * re-used with the upper 16 bits of the uid/gid intact
2905  */
2906                 if(!ei->i_dtime) {
2907                         raw_inode->i_uid_high =
2908                                 cpu_to_le16(high_16_bits(inode->i_uid));
2909                         raw_inode->i_gid_high =
2910                                 cpu_to_le16(high_16_bits(inode->i_gid));
2911                 } else {
2912                         raw_inode->i_uid_high = 0;
2913                         raw_inode->i_gid_high = 0;
2914                 }
2915         } else {
2916                 raw_inode->i_uid_low =
2917                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2918                 raw_inode->i_gid_low =
2919                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2920                 raw_inode->i_uid_high = 0;
2921                 raw_inode->i_gid_high = 0;
2922         }
2923         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2924
2925         EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2926         EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2927         EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2928         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2929
2930         if (ext4_inode_blocks_set(handle, raw_inode, ei))
2931                 goto out_brelse;
2932         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2933         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2934         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2935             cpu_to_le32(EXT4_OS_HURD))
2936                 raw_inode->i_file_acl_high =
2937                         cpu_to_le16(ei->i_file_acl >> 32);
2938         raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
2939         ext4_isize_set(raw_inode, ei->i_disksize);
2940         if (ei->i_disksize > 0x7fffffffULL) {
2941                 struct super_block *sb = inode->i_sb;
2942                 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2943                                 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2944                                 EXT4_SB(sb)->s_es->s_rev_level ==
2945                                 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2946                         /* If this is the first large file
2947                          * created, add a flag to the superblock.
2948                          */
2949                         err = ext4_journal_get_write_access(handle,
2950                                         EXT4_SB(sb)->s_sbh);
2951                         if (err)
2952                                 goto out_brelse;
2953                         ext4_update_dynamic_rev(sb);
2954                         EXT4_SET_RO_COMPAT_FEATURE(sb,
2955                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2956                         sb->s_dirt = 1;
2957                         handle->h_sync = 1;
2958                         err = ext4_journal_dirty_metadata(handle,
2959                                         EXT4_SB(sb)->s_sbh);
2960                 }
2961         }
2962         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2963         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2964                 if (old_valid_dev(inode->i_rdev)) {
2965                         raw_inode->i_block[0] =
2966                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2967                         raw_inode->i_block[1] = 0;
2968                 } else {
2969                         raw_inode->i_block[0] = 0;
2970                         raw_inode->i_block[1] =
2971                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2972                         raw_inode->i_block[2] = 0;
2973                 }
2974         } else for (block = 0; block < EXT4_N_BLOCKS; block++)
2975                 raw_inode->i_block[block] = ei->i_data[block];
2976
2977         if (ei->i_extra_isize)
2978                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2979
2980         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2981         rc = ext4_journal_dirty_metadata(handle, bh);
2982         if (!err)
2983                 err = rc;
2984         ei->i_state &= ~EXT4_STATE_NEW;
2985
2986 out_brelse:
2987         brelse (bh);
2988         ext4_std_error(inode->i_sb, err);
2989         return err;
2990 }
2991
2992 /*
2993  * ext4_write_inode()
2994  *
2995  * We are called from a few places:
2996  *
2997  * - Within generic_file_write() for O_SYNC files.
2998  *   Here, there will be no transaction running. We wait for any running
2999  *   trasnaction to commit.
3000  *
3001  * - Within sys_sync(), kupdate and such.
3002  *   We wait on commit, if tol to.
3003  *
3004  * - Within prune_icache() (PF_MEMALLOC == true)
3005  *   Here we simply return.  We can't afford to block kswapd on the
3006  *   journal commit.
3007  *
3008  * In all cases it is actually safe for us to return without doing anything,
3009  * because the inode has been copied into a raw inode buffer in
3010  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3011  * knfsd.
3012  *
3013  * Note that we are absolutely dependent upon all inode dirtiers doing the
3014  * right thing: they *must* call mark_inode_dirty() after dirtying info in
3015  * which we are interested.
3016  *
3017  * It would be a bug for them to not do this.  The code:
3018  *
3019  *      mark_inode_dirty(inode)
3020  *      stuff();
3021  *      inode->i_size = expr;
3022  *
3023  * is in error because a kswapd-driven write_inode() could occur while
3024  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3025  * will no longer be on the superblock's dirty inode list.
3026  */
3027 int ext4_write_inode(struct inode *inode, int wait)
3028 {
3029         if (current->flags & PF_MEMALLOC)
3030                 return 0;
3031
3032         if (ext4_journal_current_handle()) {
3033                 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3034                 dump_stack();
3035                 return -EIO;
3036         }
3037
3038         if (!wait)
3039                 return 0;
3040
3041         return ext4_force_commit(inode->i_sb);
3042 }
3043
3044 /*
3045  * ext4_setattr()
3046  *
3047  * Called from notify_change.
3048  *
3049  * We want to trap VFS attempts to truncate the file as soon as
3050  * possible.  In particular, we want to make sure that when the VFS
3051  * shrinks i_size, we put the inode on the orphan list and modify
3052  * i_disksize immediately, so that during the subsequent flushing of
3053  * dirty pages and freeing of disk blocks, we can guarantee that any
3054  * commit will leave the blocks being flushed in an unused state on
3055  * disk.  (On recovery, the inode will get truncated and the blocks will
3056  * be freed, so we have a strong guarantee that no future commit will
3057  * leave these blocks visible to the user.)
3058  *
3059  * Called with inode->sem down.
3060  */
3061 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3062 {
3063         struct inode *inode = dentry->d_inode;
3064         int error, rc = 0;
3065         const unsigned int ia_valid = attr->ia_valid;
3066
3067         error = inode_change_ok(inode, attr);
3068         if (error)
3069                 return error;
3070
3071         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3072                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3073                 handle_t *handle;
3074
3075                 /* (user+group)*(old+new) structure, inode write (sb,
3076                  * inode block, ? - but truncate inode update has it) */
3077                 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3078                                         EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3079                 if (IS_ERR(handle)) {
3080                         error = PTR_ERR(handle);
3081                         goto err_out;
3082                 }
3083                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3084                 if (error) {
3085                         ext4_journal_stop(handle);
3086                         return error;
3087                 }
3088                 /* Update corresponding info in inode so that everything is in
3089                  * one transaction */
3090                 if (attr->ia_valid & ATTR_UID)
3091                         inode->i_uid = attr->ia_uid;
3092                 if (attr->ia_valid & ATTR_GID)
3093                         inode->i_gid = attr->ia_gid;
3094                 error = ext4_mark_inode_dirty(handle, inode);
3095                 ext4_journal_stop(handle);
3096         }
3097
3098         if (attr->ia_valid & ATTR_SIZE) {
3099                 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3100                         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3101
3102                         if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3103                                 error = -EFBIG;
3104                                 goto err_out;
3105                         }
3106                 }
3107         }
3108
3109         if (S_ISREG(inode->i_mode) &&
3110             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3111                 handle_t *handle;
3112
3113                 handle = ext4_journal_start(inode, 3);
3114                 if (IS_ERR(handle)) {
3115                         error = PTR_ERR(handle);
3116                         goto err_out;
3117                 }
3118
3119                 error = ext4_orphan_add(handle, inode);
3120                 EXT4_I(inode)->i_disksize = attr->ia_size;
3121                 rc = ext4_mark_inode_dirty(handle, inode);
3122                 if (!error)
3123                         error = rc;
3124                 ext4_journal_stop(handle);
3125         }
3126
3127         rc = inode_setattr(inode, attr);
3128
3129         /* If inode_setattr's call to ext4_truncate failed to get a
3130          * transaction handle at all, we need to clean up the in-core
3131          * orphan list manually. */
3132         if (inode->i_nlink)
3133                 ext4_orphan_del(NULL, inode);
3134
3135         if (!rc && (ia_valid & ATTR_MODE))
3136                 rc = ext4_acl_chmod(inode);
3137
3138 err_out:
3139         ext4_std_error(inode->i_sb, error);
3140         if (!error)
3141                 error = rc;
3142         return error;
3143 }
3144
3145
3146 /*
3147  * How many blocks doth make a writepage()?
3148  *
3149  * With N blocks per page, it may be:
3150  * N data blocks
3151  * 2 indirect block
3152  * 2 dindirect
3153  * 1 tindirect
3154  * N+5 bitmap blocks (from the above)
3155  * N+5 group descriptor summary blocks
3156  * 1 inode block
3157  * 1 superblock.
3158  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3159  *
3160  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3161  *
3162  * With ordered or writeback data it's the same, less the N data blocks.
3163  *
3164  * If the inode's direct blocks can hold an integral number of pages then a
3165  * page cannot straddle two indirect blocks, and we can only touch one indirect
3166  * and dindirect block, and the "5" above becomes "3".
3167  *
3168  * This still overestimates under most circumstances.  If we were to pass the
3169  * start and end offsets in here as well we could do block_to_path() on each
3170  * block and work out the exact number of indirects which are touched.  Pah.
3171  */
3172
3173 int ext4_writepage_trans_blocks(struct inode *inode)
3174 {
3175         int bpp = ext4_journal_blocks_per_page(inode);
3176         int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3177         int ret;
3178
3179         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3180                 return ext4_ext_writepage_trans_blocks(inode, bpp);
3181
3182         if (ext4_should_journal_data(inode))
3183                 ret = 3 * (bpp + indirects) + 2;
3184         else
3185                 ret = 2 * (bpp + indirects) + 2;
3186
3187 #ifdef CONFIG_QUOTA
3188         /* We know that structure was already allocated during DQUOT_INIT so
3189          * we will be updating only the data blocks + inodes */
3190         ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3191 #endif
3192
3193         return ret;
3194 }
3195
3196 /*
3197  * The caller must have previously called ext4_reserve_inode_write().
3198  * Give this, we know that the caller already has write access to iloc->bh.
3199  */
3200 int ext4_mark_iloc_dirty(handle_t *handle,
3201                 struct inode *inode, struct ext4_iloc *iloc)
3202 {
3203         int err = 0;
3204
3205         /* the do_update_inode consumes one bh->b_count */
3206         get_bh(iloc->bh);
3207
3208         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3209         err = ext4_do_update_inode(handle, inode, iloc);
3210         put_bh(iloc->bh);
3211         return err;
3212 }
3213
3214 /*
3215  * On success, We end up with an outstanding reference count against
3216  * iloc->bh.  This _must_ be cleaned up later.
3217  */
3218
3219 int
3220 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3221                          struct ext4_iloc *iloc)
3222 {
3223         int err = 0;
3224         if (handle) {
3225                 err = ext4_get_inode_loc(inode, iloc);
3226                 if (!err) {
3227                         BUFFER_TRACE(iloc->bh, "get_write_access");
3228                         err = ext4_journal_get_write_access(handle, iloc->bh);
3229                         if (err) {
3230                                 brelse(iloc->bh);
3231                                 iloc->bh = NULL;
3232                         }
3233                 }
3234         }
3235         ext4_std_error(inode->i_sb, err);
3236         return err;
3237 }
3238
3239 /*
3240  * Expand an inode by new_extra_isize bytes.
3241  * Returns 0 on success or negative error number on failure.
3242  */
3243 static int ext4_expand_extra_isize(struct inode *inode,
3244                                    unsigned int new_extra_isize,
3245                                    struct ext4_iloc iloc,
3246                                    handle_t *handle)
3247 {
3248         struct ext4_inode *raw_inode;
3249         struct ext4_xattr_ibody_header *header;
3250         struct ext4_xattr_entry *entry;
3251
3252         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3253                 return 0;
3254
3255         raw_inode = ext4_raw_inode(&iloc);
3256
3257         header = IHDR(inode, raw_inode);
3258         entry = IFIRST(header);
3259
3260         /* No extended attributes present */
3261         if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3262                 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3263                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3264                         new_extra_isize);
3265                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3266                 return 0;
3267         }
3268
3269         /* try to expand with EAs present */
3270         return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3271                                           raw_inode, handle);
3272 }
3273
3274 /*
3275  * What we do here is to mark the in-core inode as clean with respect to inode
3276  * dirtiness (it may still be data-dirty).
3277  * This means that the in-core inode may be reaped by prune_icache
3278  * without having to perform any I/O.  This is a very good thing,
3279  * because *any* task may call prune_icache - even ones which
3280  * have a transaction open against a different journal.
3281  *
3282  * Is this cheating?  Not really.  Sure, we haven't written the
3283  * inode out, but prune_icache isn't a user-visible syncing function.
3284  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3285  * we start and wait on commits.
3286  *
3287  * Is this efficient/effective?  Well, we're being nice to the system
3288  * by cleaning up our inodes proactively so they can be reaped
3289  * without I/O.  But we are potentially leaving up to five seconds'
3290  * worth of inodes floating about which prune_icache wants us to
3291  * write out.  One way to fix that would be to get prune_icache()
3292  * to do a write_super() to free up some memory.  It has the desired
3293  * effect.
3294  */
3295 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3296 {
3297         struct ext4_iloc iloc;
3298         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3299         static unsigned int mnt_count;
3300         int err, ret;
3301
3302         might_sleep();
3303         err = ext4_reserve_inode_write(handle, inode, &iloc);
3304         if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3305             !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3306                 /*
3307                  * We need extra buffer credits since we may write into EA block
3308                  * with this same handle. If journal_extend fails, then it will
3309                  * only result in a minor loss of functionality for that inode.
3310                  * If this is felt to be critical, then e2fsck should be run to
3311                  * force a large enough s_min_extra_isize.
3312                  */
3313                 if ((jbd2_journal_extend(handle,
3314                              EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3315                         ret = ext4_expand_extra_isize(inode,
3316                                                       sbi->s_want_extra_isize,
3317                                                       iloc, handle);
3318                         if (ret) {
3319                                 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3320                                 if (mnt_count !=
3321                                         le16_to_cpu(sbi->s_es->s_mnt_count)) {
3322                                         ext4_warning(inode->i_sb, __FUNCTION__,
3323                                         "Unable to expand inode %lu. Delete"
3324                                         " some EAs or run e2fsck.",
3325                                         inode->i_ino);
3326                                         mnt_count =
3327                                           le16_to_cpu(sbi->s_es->s_mnt_count);
3328                                 }
3329                         }
3330                 }
3331         }
3332         if (!err)
3333                 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3334         return err;
3335 }
3336
3337 /*
3338  * ext4_dirty_inode() is called from __mark_inode_dirty()
3339  *
3340  * We're really interested in the case where a file is being extended.
3341  * i_size has been changed by generic_commit_write() and we thus need
3342  * to include the updated inode in the current transaction.
3343  *
3344  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3345  * are allocated to the file.
3346  *
3347  * If the inode is marked synchronous, we don't honour that here - doing
3348  * so would cause a commit on atime updates, which we don't bother doing.
3349  * We handle synchronous inodes at the highest possible level.
3350  */
3351 void ext4_dirty_inode(struct inode *inode)
3352 {
3353         handle_t *current_handle = ext4_journal_current_handle();
3354         handle_t *handle;
3355
3356         handle = ext4_journal_start(inode, 2);
3357         if (IS_ERR(handle))
3358                 goto out;
3359         if (current_handle &&
3360                 current_handle->h_transaction != handle->h_transaction) {
3361                 /* This task has a transaction open against a different fs */
3362                 printk(KERN_EMERG "%s: transactions do not match!\n",
3363                        __FUNCTION__);
3364         } else {
3365                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3366                                 current_handle);
3367                 ext4_mark_inode_dirty(handle, inode);
3368         }
3369         ext4_journal_stop(handle);
3370 out:
3371         return;
3372 }
3373
3374 #if 0
3375 /*
3376  * Bind an inode's backing buffer_head into this transaction, to prevent
3377  * it from being flushed to disk early.  Unlike
3378  * ext4_reserve_inode_write, this leaves behind no bh reference and
3379  * returns no iloc structure, so the caller needs to repeat the iloc
3380  * lookup to mark the inode dirty later.
3381  */
3382 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3383 {
3384         struct ext4_iloc iloc;
3385
3386         int err = 0;
3387         if (handle) {
3388                 err = ext4_get_inode_loc(inode, &iloc);
3389                 if (!err) {
3390                         BUFFER_TRACE(iloc.bh, "get_write_access");
3391                         err = jbd2_journal_get_write_access(handle, iloc.bh);
3392                         if (!err)
3393                                 err = ext4_journal_dirty_metadata(handle,
3394                                                                   iloc.bh);
3395                         brelse(iloc.bh);
3396                 }
3397         }
3398         ext4_std_error(inode->i_sb, err);
3399         return err;
3400 }
3401 #endif
3402
3403 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3404 {
3405         journal_t *journal;
3406         handle_t *handle;
3407         int err;
3408
3409         /*
3410          * We have to be very careful here: changing a data block's
3411          * journaling status dynamically is dangerous.  If we write a
3412          * data block to the journal, change the status and then delete
3413          * that block, we risk forgetting to revoke the old log record
3414          * from the journal and so a subsequent replay can corrupt data.
3415          * So, first we make sure that the journal is empty and that
3416          * nobody is changing anything.
3417          */
3418
3419         journal = EXT4_JOURNAL(inode);
3420         if (is_journal_aborted(journal))
3421                 return -EROFS;
3422
3423         jbd2_journal_lock_updates(journal);
3424         jbd2_journal_flush(journal);
3425
3426         /*
3427          * OK, there are no updates running now, and all cached data is
3428          * synced to disk.  We are now in a completely consistent state
3429          * which doesn't have anything in the journal, and we know that
3430          * no filesystem updates are running, so it is safe to modify
3431          * the inode's in-core data-journaling state flag now.
3432          */
3433
3434         if (val)
3435                 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3436         else
3437                 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3438         ext4_set_aops(inode);
3439
3440         jbd2_journal_unlock_updates(journal);
3441
3442         /* Finally we can mark the inode as dirty. */
3443
3444         handle = ext4_journal_start(inode, 1);
3445         if (IS_ERR(handle))
3446                 return PTR_ERR(handle);
3447
3448         err = ext4_mark_inode_dirty(handle, inode);
3449         handle->h_sync = 1;
3450         ext4_journal_stop(handle);
3451         ext4_std_error(inode->i_sb, err);
3452
3453         return err;
3454 }