Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[linux-2.6] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/ext4_jbd2.h>
29 #include <linux/jbd2.h>
30 #include <linux/highuid.h>
31 #include <linux/pagemap.h>
32 #include <linux/quotaops.h>
33 #include <linux/string.h>
34 #include <linux/buffer_head.h>
35 #include <linux/writeback.h>
36 #include <linux/mpage.h>
37 #include <linux/uio.h>
38 #include <linux/bio.h>
39 #include "xattr.h"
40 #include "acl.h"
41
42 /*
43  * Test whether an inode is a fast symlink.
44  */
45 static int ext4_inode_is_fast_symlink(struct inode *inode)
46 {
47         int ea_blocks = EXT4_I(inode)->i_file_acl ?
48                 (inode->i_sb->s_blocksize >> 9) : 0;
49
50         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
51 }
52
53 /*
54  * The ext4 forget function must perform a revoke if we are freeing data
55  * which has been journaled.  Metadata (eg. indirect blocks) must be
56  * revoked in all cases.
57  *
58  * "bh" may be NULL: a metadata block may have been freed from memory
59  * but there may still be a record of it in the journal, and that record
60  * still needs to be revoked.
61  */
62 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
63                         struct buffer_head *bh, ext4_fsblk_t blocknr)
64 {
65         int err;
66
67         might_sleep();
68
69         BUFFER_TRACE(bh, "enter");
70
71         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
72                   "data mode %lx\n",
73                   bh, is_metadata, inode->i_mode,
74                   test_opt(inode->i_sb, DATA_FLAGS));
75
76         /* Never use the revoke function if we are doing full data
77          * journaling: there is no need to, and a V1 superblock won't
78          * support it.  Otherwise, only skip the revoke on un-journaled
79          * data blocks. */
80
81         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
82             (!is_metadata && !ext4_should_journal_data(inode))) {
83                 if (bh) {
84                         BUFFER_TRACE(bh, "call jbd2_journal_forget");
85                         return ext4_journal_forget(handle, bh);
86                 }
87                 return 0;
88         }
89
90         /*
91          * data!=journal && (is_metadata || should_journal_data(inode))
92          */
93         BUFFER_TRACE(bh, "call ext4_journal_revoke");
94         err = ext4_journal_revoke(handle, blocknr, bh);
95         if (err)
96                 ext4_abort(inode->i_sb, __FUNCTION__,
97                            "error %d when attempting revoke", err);
98         BUFFER_TRACE(bh, "exit");
99         return err;
100 }
101
102 /*
103  * Work out how many blocks we need to proceed with the next chunk of a
104  * truncate transaction.
105  */
106 static unsigned long blocks_for_truncate(struct inode *inode)
107 {
108         unsigned long needed;
109
110         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
111
112         /* Give ourselves just enough room to cope with inodes in which
113          * i_blocks is corrupt: we've seen disk corruptions in the past
114          * which resulted in random data in an inode which looked enough
115          * like a regular file for ext4 to try to delete it.  Things
116          * will go a bit crazy if that happens, but at least we should
117          * try not to panic the whole kernel. */
118         if (needed < 2)
119                 needed = 2;
120
121         /* But we need to bound the transaction so we don't overflow the
122          * journal. */
123         if (needed > EXT4_MAX_TRANS_DATA)
124                 needed = EXT4_MAX_TRANS_DATA;
125
126         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
127 }
128
129 /*
130  * Truncate transactions can be complex and absolutely huge.  So we need to
131  * be able to restart the transaction at a conventient checkpoint to make
132  * sure we don't overflow the journal.
133  *
134  * start_transaction gets us a new handle for a truncate transaction,
135  * and extend_transaction tries to extend the existing one a bit.  If
136  * extend fails, we need to propagate the failure up and restart the
137  * transaction in the top-level truncate loop. --sct
138  */
139 static handle_t *start_transaction(struct inode *inode)
140 {
141         handle_t *result;
142
143         result = ext4_journal_start(inode, blocks_for_truncate(inode));
144         if (!IS_ERR(result))
145                 return result;
146
147         ext4_std_error(inode->i_sb, PTR_ERR(result));
148         return result;
149 }
150
151 /*
152  * Try to extend this transaction for the purposes of truncation.
153  *
154  * Returns 0 if we managed to create more room.  If we can't create more
155  * room, and the transaction must be restarted we return 1.
156  */
157 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
158 {
159         if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
160                 return 0;
161         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
162                 return 0;
163         return 1;
164 }
165
166 /*
167  * Restart the transaction associated with *handle.  This does a commit,
168  * so before we call here everything must be consistently dirtied against
169  * this transaction.
170  */
171 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
172 {
173         jbd_debug(2, "restarting handle %p\n", handle);
174         return ext4_journal_restart(handle, blocks_for_truncate(inode));
175 }
176
177 /*
178  * Called at the last iput() if i_nlink is zero.
179  */
180 void ext4_delete_inode (struct inode * inode)
181 {
182         handle_t *handle;
183
184         truncate_inode_pages(&inode->i_data, 0);
185
186         if (is_bad_inode(inode))
187                 goto no_delete;
188
189         handle = start_transaction(inode);
190         if (IS_ERR(handle)) {
191                 /*
192                  * If we're going to skip the normal cleanup, we still need to
193                  * make sure that the in-core orphan linked list is properly
194                  * cleaned up.
195                  */
196                 ext4_orphan_del(NULL, inode);
197                 goto no_delete;
198         }
199
200         if (IS_SYNC(inode))
201                 handle->h_sync = 1;
202         inode->i_size = 0;
203         if (inode->i_blocks)
204                 ext4_truncate(inode);
205         /*
206          * Kill off the orphan record which ext4_truncate created.
207          * AKPM: I think this can be inside the above `if'.
208          * Note that ext4_orphan_del() has to be able to cope with the
209          * deletion of a non-existent orphan - this is because we don't
210          * know if ext4_truncate() actually created an orphan record.
211          * (Well, we could do this if we need to, but heck - it works)
212          */
213         ext4_orphan_del(handle, inode);
214         EXT4_I(inode)->i_dtime  = get_seconds();
215
216         /*
217          * One subtle ordering requirement: if anything has gone wrong
218          * (transaction abort, IO errors, whatever), then we can still
219          * do these next steps (the fs will already have been marked as
220          * having errors), but we can't free the inode if the mark_dirty
221          * fails.
222          */
223         if (ext4_mark_inode_dirty(handle, inode))
224                 /* If that failed, just do the required in-core inode clear. */
225                 clear_inode(inode);
226         else
227                 ext4_free_inode(handle, inode);
228         ext4_journal_stop(handle);
229         return;
230 no_delete:
231         clear_inode(inode);     /* We must guarantee clearing of inode... */
232 }
233
234 typedef struct {
235         __le32  *p;
236         __le32  key;
237         struct buffer_head *bh;
238 } Indirect;
239
240 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
241 {
242         p->key = *(p->p = v);
243         p->bh = bh;
244 }
245
246 static int verify_chain(Indirect *from, Indirect *to)
247 {
248         while (from <= to && from->key == *from->p)
249                 from++;
250         return (from > to);
251 }
252
253 /**
254  *      ext4_block_to_path - parse the block number into array of offsets
255  *      @inode: inode in question (we are only interested in its superblock)
256  *      @i_block: block number to be parsed
257  *      @offsets: array to store the offsets in
258  *      @boundary: set this non-zero if the referred-to block is likely to be
259  *             followed (on disk) by an indirect block.
260  *
261  *      To store the locations of file's data ext4 uses a data structure common
262  *      for UNIX filesystems - tree of pointers anchored in the inode, with
263  *      data blocks at leaves and indirect blocks in intermediate nodes.
264  *      This function translates the block number into path in that tree -
265  *      return value is the path length and @offsets[n] is the offset of
266  *      pointer to (n+1)th node in the nth one. If @block is out of range
267  *      (negative or too large) warning is printed and zero returned.
268  *
269  *      Note: function doesn't find node addresses, so no IO is needed. All
270  *      we need to know is the capacity of indirect blocks (taken from the
271  *      inode->i_sb).
272  */
273
274 /*
275  * Portability note: the last comparison (check that we fit into triple
276  * indirect block) is spelled differently, because otherwise on an
277  * architecture with 32-bit longs and 8Kb pages we might get into trouble
278  * if our filesystem had 8Kb blocks. We might use long long, but that would
279  * kill us on x86. Oh, well, at least the sign propagation does not matter -
280  * i_block would have to be negative in the very beginning, so we would not
281  * get there at all.
282  */
283
284 static int ext4_block_to_path(struct inode *inode,
285                         long i_block, int offsets[4], int *boundary)
286 {
287         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
288         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
289         const long direct_blocks = EXT4_NDIR_BLOCKS,
290                 indirect_blocks = ptrs,
291                 double_blocks = (1 << (ptrs_bits * 2));
292         int n = 0;
293         int final = 0;
294
295         if (i_block < 0) {
296                 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
297         } else if (i_block < direct_blocks) {
298                 offsets[n++] = i_block;
299                 final = direct_blocks;
300         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
301                 offsets[n++] = EXT4_IND_BLOCK;
302                 offsets[n++] = i_block;
303                 final = ptrs;
304         } else if ((i_block -= indirect_blocks) < double_blocks) {
305                 offsets[n++] = EXT4_DIND_BLOCK;
306                 offsets[n++] = i_block >> ptrs_bits;
307                 offsets[n++] = i_block & (ptrs - 1);
308                 final = ptrs;
309         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
310                 offsets[n++] = EXT4_TIND_BLOCK;
311                 offsets[n++] = i_block >> (ptrs_bits * 2);
312                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
313                 offsets[n++] = i_block & (ptrs - 1);
314                 final = ptrs;
315         } else {
316                 ext4_warning(inode->i_sb, "ext4_block_to_path", "block > big");
317         }
318         if (boundary)
319                 *boundary = final - 1 - (i_block & (ptrs - 1));
320         return n;
321 }
322
323 /**
324  *      ext4_get_branch - read the chain of indirect blocks leading to data
325  *      @inode: inode in question
326  *      @depth: depth of the chain (1 - direct pointer, etc.)
327  *      @offsets: offsets of pointers in inode/indirect blocks
328  *      @chain: place to store the result
329  *      @err: here we store the error value
330  *
331  *      Function fills the array of triples <key, p, bh> and returns %NULL
332  *      if everything went OK or the pointer to the last filled triple
333  *      (incomplete one) otherwise. Upon the return chain[i].key contains
334  *      the number of (i+1)-th block in the chain (as it is stored in memory,
335  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
336  *      number (it points into struct inode for i==0 and into the bh->b_data
337  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
338  *      block for i>0 and NULL for i==0. In other words, it holds the block
339  *      numbers of the chain, addresses they were taken from (and where we can
340  *      verify that chain did not change) and buffer_heads hosting these
341  *      numbers.
342  *
343  *      Function stops when it stumbles upon zero pointer (absent block)
344  *              (pointer to last triple returned, *@err == 0)
345  *      or when it gets an IO error reading an indirect block
346  *              (ditto, *@err == -EIO)
347  *      or when it notices that chain had been changed while it was reading
348  *              (ditto, *@err == -EAGAIN)
349  *      or when it reads all @depth-1 indirect blocks successfully and finds
350  *      the whole chain, all way to the data (returns %NULL, *err == 0).
351  */
352 static Indirect *ext4_get_branch(struct inode *inode, int depth, int *offsets,
353                                  Indirect chain[4], int *err)
354 {
355         struct super_block *sb = inode->i_sb;
356         Indirect *p = chain;
357         struct buffer_head *bh;
358
359         *err = 0;
360         /* i_data is not going away, no lock needed */
361         add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
362         if (!p->key)
363                 goto no_block;
364         while (--depth) {
365                 bh = sb_bread(sb, le32_to_cpu(p->key));
366                 if (!bh)
367                         goto failure;
368                 /* Reader: pointers */
369                 if (!verify_chain(chain, p))
370                         goto changed;
371                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
372                 /* Reader: end */
373                 if (!p->key)
374                         goto no_block;
375         }
376         return NULL;
377
378 changed:
379         brelse(bh);
380         *err = -EAGAIN;
381         goto no_block;
382 failure:
383         *err = -EIO;
384 no_block:
385         return p;
386 }
387
388 /**
389  *      ext4_find_near - find a place for allocation with sufficient locality
390  *      @inode: owner
391  *      @ind: descriptor of indirect block.
392  *
393  *      This function returns the prefered place for block allocation.
394  *      It is used when heuristic for sequential allocation fails.
395  *      Rules are:
396  *        + if there is a block to the left of our position - allocate near it.
397  *        + if pointer will live in indirect block - allocate near that block.
398  *        + if pointer will live in inode - allocate in the same
399  *          cylinder group.
400  *
401  * In the latter case we colour the starting block by the callers PID to
402  * prevent it from clashing with concurrent allocations for a different inode
403  * in the same block group.   The PID is used here so that functionally related
404  * files will be close-by on-disk.
405  *
406  *      Caller must make sure that @ind is valid and will stay that way.
407  */
408 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
409 {
410         struct ext4_inode_info *ei = EXT4_I(inode);
411         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
412         __le32 *p;
413         ext4_fsblk_t bg_start;
414         ext4_grpblk_t colour;
415
416         /* Try to find previous block */
417         for (p = ind->p - 1; p >= start; p--) {
418                 if (*p)
419                         return le32_to_cpu(*p);
420         }
421
422         /* No such thing, so let's try location of indirect block */
423         if (ind->bh)
424                 return ind->bh->b_blocknr;
425
426         /*
427          * It is going to be referred to from the inode itself? OK, just put it
428          * into the same cylinder group then.
429          */
430         bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
431         colour = (current->pid % 16) *
432                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
433         return bg_start + colour;
434 }
435
436 /**
437  *      ext4_find_goal - find a prefered place for allocation.
438  *      @inode: owner
439  *      @block:  block we want
440  *      @chain:  chain of indirect blocks
441  *      @partial: pointer to the last triple within a chain
442  *      @goal:  place to store the result.
443  *
444  *      Normally this function find the prefered place for block allocation,
445  *      stores it in *@goal and returns zero.
446  */
447
448 static ext4_fsblk_t ext4_find_goal(struct inode *inode, long block,
449                 Indirect chain[4], Indirect *partial)
450 {
451         struct ext4_block_alloc_info *block_i;
452
453         block_i =  EXT4_I(inode)->i_block_alloc_info;
454
455         /*
456          * try the heuristic for sequential allocation,
457          * failing that at least try to get decent locality.
458          */
459         if (block_i && (block == block_i->last_alloc_logical_block + 1)
460                 && (block_i->last_alloc_physical_block != 0)) {
461                 return block_i->last_alloc_physical_block + 1;
462         }
463
464         return ext4_find_near(inode, partial);
465 }
466
467 /**
468  *      ext4_blks_to_allocate: Look up the block map and count the number
469  *      of direct blocks need to be allocated for the given branch.
470  *
471  *      @branch: chain of indirect blocks
472  *      @k: number of blocks need for indirect blocks
473  *      @blks: number of data blocks to be mapped.
474  *      @blocks_to_boundary:  the offset in the indirect block
475  *
476  *      return the total number of blocks to be allocate, including the
477  *      direct and indirect blocks.
478  */
479 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
480                 int blocks_to_boundary)
481 {
482         unsigned long count = 0;
483
484         /*
485          * Simple case, [t,d]Indirect block(s) has not allocated yet
486          * then it's clear blocks on that path have not allocated
487          */
488         if (k > 0) {
489                 /* right now we don't handle cross boundary allocation */
490                 if (blks < blocks_to_boundary + 1)
491                         count += blks;
492                 else
493                         count += blocks_to_boundary + 1;
494                 return count;
495         }
496
497         count++;
498         while (count < blks && count <= blocks_to_boundary &&
499                 le32_to_cpu(*(branch[0].p + count)) == 0) {
500                 count++;
501         }
502         return count;
503 }
504
505 /**
506  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
507  *      @indirect_blks: the number of blocks need to allocate for indirect
508  *                      blocks
509  *
510  *      @new_blocks: on return it will store the new block numbers for
511  *      the indirect blocks(if needed) and the first direct block,
512  *      @blks:  on return it will store the total number of allocated
513  *              direct blocks
514  */
515 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
516                         ext4_fsblk_t goal, int indirect_blks, int blks,
517                         ext4_fsblk_t new_blocks[4], int *err)
518 {
519         int target, i;
520         unsigned long count = 0;
521         int index = 0;
522         ext4_fsblk_t current_block = 0;
523         int ret = 0;
524
525         /*
526          * Here we try to allocate the requested multiple blocks at once,
527          * on a best-effort basis.
528          * To build a branch, we should allocate blocks for
529          * the indirect blocks(if not allocated yet), and at least
530          * the first direct block of this branch.  That's the
531          * minimum number of blocks need to allocate(required)
532          */
533         target = blks + indirect_blks;
534
535         while (1) {
536                 count = target;
537                 /* allocating blocks for indirect blocks and direct blocks */
538                 current_block = ext4_new_blocks(handle,inode,goal,&count,err);
539                 if (*err)
540                         goto failed_out;
541
542                 target -= count;
543                 /* allocate blocks for indirect blocks */
544                 while (index < indirect_blks && count) {
545                         new_blocks[index++] = current_block++;
546                         count--;
547                 }
548
549                 if (count > 0)
550                         break;
551         }
552
553         /* save the new block number for the first direct block */
554         new_blocks[index] = current_block;
555
556         /* total number of blocks allocated for direct blocks */
557         ret = count;
558         *err = 0;
559         return ret;
560 failed_out:
561         for (i = 0; i <index; i++)
562                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
563         return ret;
564 }
565
566 /**
567  *      ext4_alloc_branch - allocate and set up a chain of blocks.
568  *      @inode: owner
569  *      @indirect_blks: number of allocated indirect blocks
570  *      @blks: number of allocated direct blocks
571  *      @offsets: offsets (in the blocks) to store the pointers to next.
572  *      @branch: place to store the chain in.
573  *
574  *      This function allocates blocks, zeroes out all but the last one,
575  *      links them into chain and (if we are synchronous) writes them to disk.
576  *      In other words, it prepares a branch that can be spliced onto the
577  *      inode. It stores the information about that chain in the branch[], in
578  *      the same format as ext4_get_branch() would do. We are calling it after
579  *      we had read the existing part of chain and partial points to the last
580  *      triple of that (one with zero ->key). Upon the exit we have the same
581  *      picture as after the successful ext4_get_block(), except that in one
582  *      place chain is disconnected - *branch->p is still zero (we did not
583  *      set the last link), but branch->key contains the number that should
584  *      be placed into *branch->p to fill that gap.
585  *
586  *      If allocation fails we free all blocks we've allocated (and forget
587  *      their buffer_heads) and return the error value the from failed
588  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
589  *      as described above and return 0.
590  */
591 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
592                         int indirect_blks, int *blks, ext4_fsblk_t goal,
593                         int *offsets, Indirect *branch)
594 {
595         int blocksize = inode->i_sb->s_blocksize;
596         int i, n = 0;
597         int err = 0;
598         struct buffer_head *bh;
599         int num;
600         ext4_fsblk_t new_blocks[4];
601         ext4_fsblk_t current_block;
602
603         num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
604                                 *blks, new_blocks, &err);
605         if (err)
606                 return err;
607
608         branch[0].key = cpu_to_le32(new_blocks[0]);
609         /*
610          * metadata blocks and data blocks are allocated.
611          */
612         for (n = 1; n <= indirect_blks;  n++) {
613                 /*
614                  * Get buffer_head for parent block, zero it out
615                  * and set the pointer to new one, then send
616                  * parent to disk.
617                  */
618                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
619                 branch[n].bh = bh;
620                 lock_buffer(bh);
621                 BUFFER_TRACE(bh, "call get_create_access");
622                 err = ext4_journal_get_create_access(handle, bh);
623                 if (err) {
624                         unlock_buffer(bh);
625                         brelse(bh);
626                         goto failed;
627                 }
628
629                 memset(bh->b_data, 0, blocksize);
630                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
631                 branch[n].key = cpu_to_le32(new_blocks[n]);
632                 *branch[n].p = branch[n].key;
633                 if ( n == indirect_blks) {
634                         current_block = new_blocks[n];
635                         /*
636                          * End of chain, update the last new metablock of
637                          * the chain to point to the new allocated
638                          * data blocks numbers
639                          */
640                         for (i=1; i < num; i++)
641                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
642                 }
643                 BUFFER_TRACE(bh, "marking uptodate");
644                 set_buffer_uptodate(bh);
645                 unlock_buffer(bh);
646
647                 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
648                 err = ext4_journal_dirty_metadata(handle, bh);
649                 if (err)
650                         goto failed;
651         }
652         *blks = num;
653         return err;
654 failed:
655         /* Allocation failed, free what we already allocated */
656         for (i = 1; i <= n ; i++) {
657                 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
658                 ext4_journal_forget(handle, branch[i].bh);
659         }
660         for (i = 0; i <indirect_blks; i++)
661                 ext4_free_blocks(handle, inode, new_blocks[i], 1);
662
663         ext4_free_blocks(handle, inode, new_blocks[i], num);
664
665         return err;
666 }
667
668 /**
669  * ext4_splice_branch - splice the allocated branch onto inode.
670  * @inode: owner
671  * @block: (logical) number of block we are adding
672  * @chain: chain of indirect blocks (with a missing link - see
673  *      ext4_alloc_branch)
674  * @where: location of missing link
675  * @num:   number of indirect blocks we are adding
676  * @blks:  number of direct blocks we are adding
677  *
678  * This function fills the missing link and does all housekeeping needed in
679  * inode (->i_blocks, etc.). In case of success we end up with the full
680  * chain to new block and return 0.
681  */
682 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
683                         long block, Indirect *where, int num, int blks)
684 {
685         int i;
686         int err = 0;
687         struct ext4_block_alloc_info *block_i;
688         ext4_fsblk_t current_block;
689
690         block_i = EXT4_I(inode)->i_block_alloc_info;
691         /*
692          * If we're splicing into a [td]indirect block (as opposed to the
693          * inode) then we need to get write access to the [td]indirect block
694          * before the splice.
695          */
696         if (where->bh) {
697                 BUFFER_TRACE(where->bh, "get_write_access");
698                 err = ext4_journal_get_write_access(handle, where->bh);
699                 if (err)
700                         goto err_out;
701         }
702         /* That's it */
703
704         *where->p = where->key;
705
706         /*
707          * Update the host buffer_head or inode to point to more just allocated
708          * direct blocks blocks
709          */
710         if (num == 0 && blks > 1) {
711                 current_block = le32_to_cpu(where->key) + 1;
712                 for (i = 1; i < blks; i++)
713                         *(where->p + i ) = cpu_to_le32(current_block++);
714         }
715
716         /*
717          * update the most recently allocated logical & physical block
718          * in i_block_alloc_info, to assist find the proper goal block for next
719          * allocation
720          */
721         if (block_i) {
722                 block_i->last_alloc_logical_block = block + blks - 1;
723                 block_i->last_alloc_physical_block =
724                                 le32_to_cpu(where[num].key) + blks - 1;
725         }
726
727         /* We are done with atomic stuff, now do the rest of housekeeping */
728
729         inode->i_ctime = ext4_current_time(inode);
730         ext4_mark_inode_dirty(handle, inode);
731
732         /* had we spliced it onto indirect block? */
733         if (where->bh) {
734                 /*
735                  * If we spliced it onto an indirect block, we haven't
736                  * altered the inode.  Note however that if it is being spliced
737                  * onto an indirect block at the very end of the file (the
738                  * file is growing) then we *will* alter the inode to reflect
739                  * the new i_size.  But that is not done here - it is done in
740                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
741                  */
742                 jbd_debug(5, "splicing indirect only\n");
743                 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
744                 err = ext4_journal_dirty_metadata(handle, where->bh);
745                 if (err)
746                         goto err_out;
747         } else {
748                 /*
749                  * OK, we spliced it into the inode itself on a direct block.
750                  * Inode was dirtied above.
751                  */
752                 jbd_debug(5, "splicing direct\n");
753         }
754         return err;
755
756 err_out:
757         for (i = 1; i <= num; i++) {
758                 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
759                 ext4_journal_forget(handle, where[i].bh);
760                 ext4_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
761         }
762         ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
763
764         return err;
765 }
766
767 /*
768  * Allocation strategy is simple: if we have to allocate something, we will
769  * have to go the whole way to leaf. So let's do it before attaching anything
770  * to tree, set linkage between the newborn blocks, write them if sync is
771  * required, recheck the path, free and repeat if check fails, otherwise
772  * set the last missing link (that will protect us from any truncate-generated
773  * removals - all blocks on the path are immune now) and possibly force the
774  * write on the parent block.
775  * That has a nice additional property: no special recovery from the failed
776  * allocations is needed - we simply release blocks and do not touch anything
777  * reachable from inode.
778  *
779  * `handle' can be NULL if create == 0.
780  *
781  * The BKL may not be held on entry here.  Be sure to take it early.
782  * return > 0, # of blocks mapped or allocated.
783  * return = 0, if plain lookup failed.
784  * return < 0, error case.
785  */
786 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
787                 sector_t iblock, unsigned long maxblocks,
788                 struct buffer_head *bh_result,
789                 int create, int extend_disksize)
790 {
791         int err = -EIO;
792         int offsets[4];
793         Indirect chain[4];
794         Indirect *partial;
795         ext4_fsblk_t goal;
796         int indirect_blks;
797         int blocks_to_boundary = 0;
798         int depth;
799         struct ext4_inode_info *ei = EXT4_I(inode);
800         int count = 0;
801         ext4_fsblk_t first_block = 0;
802
803
804         J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
805         J_ASSERT(handle != NULL || create == 0);
806         depth = ext4_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
807
808         if (depth == 0)
809                 goto out;
810
811         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
812
813         /* Simplest case - block found, no allocation needed */
814         if (!partial) {
815                 first_block = le32_to_cpu(chain[depth - 1].key);
816                 clear_buffer_new(bh_result);
817                 count++;
818                 /*map more blocks*/
819                 while (count < maxblocks && count <= blocks_to_boundary) {
820                         ext4_fsblk_t blk;
821
822                         if (!verify_chain(chain, partial)) {
823                                 /*
824                                  * Indirect block might be removed by
825                                  * truncate while we were reading it.
826                                  * Handling of that case: forget what we've
827                                  * got now. Flag the err as EAGAIN, so it
828                                  * will reread.
829                                  */
830                                 err = -EAGAIN;
831                                 count = 0;
832                                 break;
833                         }
834                         blk = le32_to_cpu(*(chain[depth-1].p + count));
835
836                         if (blk == first_block + count)
837                                 count++;
838                         else
839                                 break;
840                 }
841                 if (err != -EAGAIN)
842                         goto got_it;
843         }
844
845         /* Next simple case - plain lookup or failed read of indirect block */
846         if (!create || err == -EIO)
847                 goto cleanup;
848
849         mutex_lock(&ei->truncate_mutex);
850
851         /*
852          * If the indirect block is missing while we are reading
853          * the chain(ext4_get_branch() returns -EAGAIN err), or
854          * if the chain has been changed after we grab the semaphore,
855          * (either because another process truncated this branch, or
856          * another get_block allocated this branch) re-grab the chain to see if
857          * the request block has been allocated or not.
858          *
859          * Since we already block the truncate/other get_block
860          * at this point, we will have the current copy of the chain when we
861          * splice the branch into the tree.
862          */
863         if (err == -EAGAIN || !verify_chain(chain, partial)) {
864                 while (partial > chain) {
865                         brelse(partial->bh);
866                         partial--;
867                 }
868                 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
869                 if (!partial) {
870                         count++;
871                         mutex_unlock(&ei->truncate_mutex);
872                         if (err)
873                                 goto cleanup;
874                         clear_buffer_new(bh_result);
875                         goto got_it;
876                 }
877         }
878
879         /*
880          * Okay, we need to do block allocation.  Lazily initialize the block
881          * allocation info here if necessary
882         */
883         if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
884                 ext4_init_block_alloc_info(inode);
885
886         goal = ext4_find_goal(inode, iblock, chain, partial);
887
888         /* the number of blocks need to allocate for [d,t]indirect blocks */
889         indirect_blks = (chain + depth) - partial - 1;
890
891         /*
892          * Next look up the indirect map to count the totoal number of
893          * direct blocks to allocate for this branch.
894          */
895         count = ext4_blks_to_allocate(partial, indirect_blks,
896                                         maxblocks, blocks_to_boundary);
897         /*
898          * Block out ext4_truncate while we alter the tree
899          */
900         err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
901                                 offsets + (partial - chain), partial);
902
903         /*
904          * The ext4_splice_branch call will free and forget any buffers
905          * on the new chain if there is a failure, but that risks using
906          * up transaction credits, especially for bitmaps where the
907          * credits cannot be returned.  Can we handle this somehow?  We
908          * may need to return -EAGAIN upwards in the worst case.  --sct
909          */
910         if (!err)
911                 err = ext4_splice_branch(handle, inode, iblock,
912                                         partial, indirect_blks, count);
913         /*
914          * i_disksize growing is protected by truncate_mutex.  Don't forget to
915          * protect it if you're about to implement concurrent
916          * ext4_get_block() -bzzz
917         */
918         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
919                 ei->i_disksize = inode->i_size;
920         mutex_unlock(&ei->truncate_mutex);
921         if (err)
922                 goto cleanup;
923
924         set_buffer_new(bh_result);
925 got_it:
926         map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
927         if (count > blocks_to_boundary)
928                 set_buffer_boundary(bh_result);
929         err = count;
930         /* Clean up and exit */
931         partial = chain + depth - 1;    /* the whole chain */
932 cleanup:
933         while (partial > chain) {
934                 BUFFER_TRACE(partial->bh, "call brelse");
935                 brelse(partial->bh);
936                 partial--;
937         }
938         BUFFER_TRACE(bh_result, "returned");
939 out:
940         return err;
941 }
942
943 #define DIO_CREDITS (EXT4_RESERVE_TRANS_BLOCKS + 32)
944
945 static int ext4_get_block(struct inode *inode, sector_t iblock,
946                         struct buffer_head *bh_result, int create)
947 {
948         handle_t *handle = ext4_journal_current_handle();
949         int ret = 0;
950         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
951
952         if (!create)
953                 goto get_block;         /* A read */
954
955         if (max_blocks == 1)
956                 goto get_block;         /* A single block get */
957
958         if (handle->h_transaction->t_state == T_LOCKED) {
959                 /*
960                  * Huge direct-io writes can hold off commits for long
961                  * periods of time.  Let this commit run.
962                  */
963                 ext4_journal_stop(handle);
964                 handle = ext4_journal_start(inode, DIO_CREDITS);
965                 if (IS_ERR(handle))
966                         ret = PTR_ERR(handle);
967                 goto get_block;
968         }
969
970         if (handle->h_buffer_credits <= EXT4_RESERVE_TRANS_BLOCKS) {
971                 /*
972                  * Getting low on buffer credits...
973                  */
974                 ret = ext4_journal_extend(handle, DIO_CREDITS);
975                 if (ret > 0) {
976                         /*
977                          * Couldn't extend the transaction.  Start a new one.
978                          */
979                         ret = ext4_journal_restart(handle, DIO_CREDITS);
980                 }
981         }
982
983 get_block:
984         if (ret == 0) {
985                 ret = ext4_get_blocks_wrap(handle, inode, iblock,
986                                         max_blocks, bh_result, create, 0);
987                 if (ret > 0) {
988                         bh_result->b_size = (ret << inode->i_blkbits);
989                         ret = 0;
990                 }
991         }
992         return ret;
993 }
994
995 /*
996  * `handle' can be NULL if create is zero
997  */
998 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
999                                 long block, int create, int *errp)
1000 {
1001         struct buffer_head dummy;
1002         int fatal = 0, err;
1003
1004         J_ASSERT(handle != NULL || create == 0);
1005
1006         dummy.b_state = 0;
1007         dummy.b_blocknr = -1000;
1008         buffer_trace_init(&dummy.b_history);
1009         err = ext4_get_blocks_wrap(handle, inode, block, 1,
1010                                         &dummy, create, 1);
1011         /*
1012          * ext4_get_blocks_handle() returns number of blocks
1013          * mapped. 0 in case of a HOLE.
1014          */
1015         if (err > 0) {
1016                 if (err > 1)
1017                         WARN_ON(1);
1018                 err = 0;
1019         }
1020         *errp = err;
1021         if (!err && buffer_mapped(&dummy)) {
1022                 struct buffer_head *bh;
1023                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1024                 if (!bh) {
1025                         *errp = -EIO;
1026                         goto err;
1027                 }
1028                 if (buffer_new(&dummy)) {
1029                         J_ASSERT(create != 0);
1030                         J_ASSERT(handle != NULL);
1031
1032                         /*
1033                          * Now that we do not always journal data, we should
1034                          * keep in mind whether this should always journal the
1035                          * new buffer as metadata.  For now, regular file
1036                          * writes use ext4_get_block instead, so it's not a
1037                          * problem.
1038                          */
1039                         lock_buffer(bh);
1040                         BUFFER_TRACE(bh, "call get_create_access");
1041                         fatal = ext4_journal_get_create_access(handle, bh);
1042                         if (!fatal && !buffer_uptodate(bh)) {
1043                                 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1044                                 set_buffer_uptodate(bh);
1045                         }
1046                         unlock_buffer(bh);
1047                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1048                         err = ext4_journal_dirty_metadata(handle, bh);
1049                         if (!fatal)
1050                                 fatal = err;
1051                 } else {
1052                         BUFFER_TRACE(bh, "not a new buffer");
1053                 }
1054                 if (fatal) {
1055                         *errp = fatal;
1056                         brelse(bh);
1057                         bh = NULL;
1058                 }
1059                 return bh;
1060         }
1061 err:
1062         return NULL;
1063 }
1064
1065 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1066                                int block, int create, int *err)
1067 {
1068         struct buffer_head * bh;
1069
1070         bh = ext4_getblk(handle, inode, block, create, err);
1071         if (!bh)
1072                 return bh;
1073         if (buffer_uptodate(bh))
1074                 return bh;
1075         ll_rw_block(READ_META, 1, &bh);
1076         wait_on_buffer(bh);
1077         if (buffer_uptodate(bh))
1078                 return bh;
1079         put_bh(bh);
1080         *err = -EIO;
1081         return NULL;
1082 }
1083
1084 static int walk_page_buffers(   handle_t *handle,
1085                                 struct buffer_head *head,
1086                                 unsigned from,
1087                                 unsigned to,
1088                                 int *partial,
1089                                 int (*fn)(      handle_t *handle,
1090                                                 struct buffer_head *bh))
1091 {
1092         struct buffer_head *bh;
1093         unsigned block_start, block_end;
1094         unsigned blocksize = head->b_size;
1095         int err, ret = 0;
1096         struct buffer_head *next;
1097
1098         for (   bh = head, block_start = 0;
1099                 ret == 0 && (bh != head || !block_start);
1100                 block_start = block_end, bh = next)
1101         {
1102                 next = bh->b_this_page;
1103                 block_end = block_start + blocksize;
1104                 if (block_end <= from || block_start >= to) {
1105                         if (partial && !buffer_uptodate(bh))
1106                                 *partial = 1;
1107                         continue;
1108                 }
1109                 err = (*fn)(handle, bh);
1110                 if (!ret)
1111                         ret = err;
1112         }
1113         return ret;
1114 }
1115
1116 /*
1117  * To preserve ordering, it is essential that the hole instantiation and
1118  * the data write be encapsulated in a single transaction.  We cannot
1119  * close off a transaction and start a new one between the ext4_get_block()
1120  * and the commit_write().  So doing the jbd2_journal_start at the start of
1121  * prepare_write() is the right place.
1122  *
1123  * Also, this function can nest inside ext4_writepage() ->
1124  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1125  * has generated enough buffer credits to do the whole page.  So we won't
1126  * block on the journal in that case, which is good, because the caller may
1127  * be PF_MEMALLOC.
1128  *
1129  * By accident, ext4 can be reentered when a transaction is open via
1130  * quota file writes.  If we were to commit the transaction while thus
1131  * reentered, there can be a deadlock - we would be holding a quota
1132  * lock, and the commit would never complete if another thread had a
1133  * transaction open and was blocking on the quota lock - a ranking
1134  * violation.
1135  *
1136  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1137  * will _not_ run commit under these circumstances because handle->h_ref
1138  * is elevated.  We'll still have enough credits for the tiny quotafile
1139  * write.
1140  */
1141 static int do_journal_get_write_access(handle_t *handle,
1142                                         struct buffer_head *bh)
1143 {
1144         if (!buffer_mapped(bh) || buffer_freed(bh))
1145                 return 0;
1146         return ext4_journal_get_write_access(handle, bh);
1147 }
1148
1149 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1150                                 loff_t pos, unsigned len, unsigned flags,
1151                                 struct page **pagep, void **fsdata)
1152 {
1153         struct inode *inode = mapping->host;
1154         int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1155         handle_t *handle;
1156         int retries = 0;
1157         struct page *page;
1158         pgoff_t index;
1159         unsigned from, to;
1160
1161         index = pos >> PAGE_CACHE_SHIFT;
1162         from = pos & (PAGE_CACHE_SIZE - 1);
1163         to = from + len;
1164
1165 retry:
1166         page = __grab_cache_page(mapping, index);
1167         if (!page)
1168                 return -ENOMEM;
1169         *pagep = page;
1170
1171         handle = ext4_journal_start(inode, needed_blocks);
1172         if (IS_ERR(handle)) {
1173                 unlock_page(page);
1174                 page_cache_release(page);
1175                 ret = PTR_ERR(handle);
1176                 goto out;
1177         }
1178
1179         ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1180                                                         ext4_get_block);
1181
1182         if (!ret && ext4_should_journal_data(inode)) {
1183                 ret = walk_page_buffers(handle, page_buffers(page),
1184                                 from, to, NULL, do_journal_get_write_access);
1185         }
1186
1187         if (ret) {
1188                 ext4_journal_stop(handle);
1189                 unlock_page(page);
1190                 page_cache_release(page);
1191         }
1192
1193         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1194                 goto retry;
1195 out:
1196         return ret;
1197 }
1198
1199 int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
1200 {
1201         int err = jbd2_journal_dirty_data(handle, bh);
1202         if (err)
1203                 ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
1204                                                 bh, handle, err);
1205         return err;
1206 }
1207
1208 /* For write_end() in data=journal mode */
1209 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1210 {
1211         if (!buffer_mapped(bh) || buffer_freed(bh))
1212                 return 0;
1213         set_buffer_uptodate(bh);
1214         return ext4_journal_dirty_metadata(handle, bh);
1215 }
1216
1217 /*
1218  * Generic write_end handler for ordered and writeback ext4 journal modes.
1219  * We can't use generic_write_end, because that unlocks the page and we need to
1220  * unlock the page after ext4_journal_stop, but ext4_journal_stop must run
1221  * after block_write_end.
1222  */
1223 static int ext4_generic_write_end(struct file *file,
1224                                 struct address_space *mapping,
1225                                 loff_t pos, unsigned len, unsigned copied,
1226                                 struct page *page, void *fsdata)
1227 {
1228         struct inode *inode = file->f_mapping->host;
1229
1230         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
1231
1232         if (pos+copied > inode->i_size) {
1233                 i_size_write(inode, pos+copied);
1234                 mark_inode_dirty(inode);
1235         }
1236
1237         return copied;
1238 }
1239
1240 /*
1241  * We need to pick up the new inode size which generic_commit_write gave us
1242  * `file' can be NULL - eg, when called from page_symlink().
1243  *
1244  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1245  * buffers are managed internally.
1246  */
1247 static int ext4_ordered_write_end(struct file *file,
1248                                 struct address_space *mapping,
1249                                 loff_t pos, unsigned len, unsigned copied,
1250                                 struct page *page, void *fsdata)
1251 {
1252         handle_t *handle = ext4_journal_current_handle();
1253         struct inode *inode = file->f_mapping->host;
1254         unsigned from, to;
1255         int ret = 0, ret2;
1256
1257         from = pos & (PAGE_CACHE_SIZE - 1);
1258         to = from + len;
1259
1260         ret = walk_page_buffers(handle, page_buffers(page),
1261                 from, to, NULL, ext4_journal_dirty_data);
1262
1263         if (ret == 0) {
1264                 /*
1265                  * generic_write_end() will run mark_inode_dirty() if i_size
1266                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1267                  * into that.
1268                  */
1269                 loff_t new_i_size;
1270
1271                 new_i_size = pos + copied;
1272                 if (new_i_size > EXT4_I(inode)->i_disksize)
1273                         EXT4_I(inode)->i_disksize = new_i_size;
1274                 copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1275                                                         page, fsdata);
1276                 if (copied < 0)
1277                         ret = copied;
1278         }
1279         ret2 = ext4_journal_stop(handle);
1280         if (!ret)
1281                 ret = ret2;
1282         unlock_page(page);
1283         page_cache_release(page);
1284
1285         return ret ? ret : copied;
1286 }
1287
1288 static int ext4_writeback_write_end(struct file *file,
1289                                 struct address_space *mapping,
1290                                 loff_t pos, unsigned len, unsigned copied,
1291                                 struct page *page, void *fsdata)
1292 {
1293         handle_t *handle = ext4_journal_current_handle();
1294         struct inode *inode = file->f_mapping->host;
1295         int ret = 0, ret2;
1296         loff_t new_i_size;
1297
1298         new_i_size = pos + copied;
1299         if (new_i_size > EXT4_I(inode)->i_disksize)
1300                 EXT4_I(inode)->i_disksize = new_i_size;
1301
1302         copied = ext4_generic_write_end(file, mapping, pos, len, copied,
1303                                                         page, fsdata);
1304         if (copied < 0)
1305                 ret = copied;
1306
1307         ret2 = ext4_journal_stop(handle);
1308         if (!ret)
1309                 ret = ret2;
1310         unlock_page(page);
1311         page_cache_release(page);
1312
1313         return ret ? ret : copied;
1314 }
1315
1316 static int ext4_journalled_write_end(struct file *file,
1317                                 struct address_space *mapping,
1318                                 loff_t pos, unsigned len, unsigned copied,
1319                                 struct page *page, void *fsdata)
1320 {
1321         handle_t *handle = ext4_journal_current_handle();
1322         struct inode *inode = mapping->host;
1323         int ret = 0, ret2;
1324         int partial = 0;
1325         unsigned from, to;
1326
1327         from = pos & (PAGE_CACHE_SIZE - 1);
1328         to = from + len;
1329
1330         if (copied < len) {
1331                 if (!PageUptodate(page))
1332                         copied = 0;
1333                 page_zero_new_buffers(page, from+copied, to);
1334         }
1335
1336         ret = walk_page_buffers(handle, page_buffers(page), from,
1337                                 to, &partial, write_end_fn);
1338         if (!partial)
1339                 SetPageUptodate(page);
1340         if (pos+copied > inode->i_size)
1341                 i_size_write(inode, pos+copied);
1342         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1343         if (inode->i_size > EXT4_I(inode)->i_disksize) {
1344                 EXT4_I(inode)->i_disksize = inode->i_size;
1345                 ret2 = ext4_mark_inode_dirty(handle, inode);
1346                 if (!ret)
1347                         ret = ret2;
1348         }
1349
1350         ret2 = ext4_journal_stop(handle);
1351         if (!ret)
1352                 ret = ret2;
1353         unlock_page(page);
1354         page_cache_release(page);
1355
1356         return ret ? ret : copied;
1357 }
1358
1359 /*
1360  * bmap() is special.  It gets used by applications such as lilo and by
1361  * the swapper to find the on-disk block of a specific piece of data.
1362  *
1363  * Naturally, this is dangerous if the block concerned is still in the
1364  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1365  * filesystem and enables swap, then they may get a nasty shock when the
1366  * data getting swapped to that swapfile suddenly gets overwritten by
1367  * the original zero's written out previously to the journal and
1368  * awaiting writeback in the kernel's buffer cache.
1369  *
1370  * So, if we see any bmap calls here on a modified, data-journaled file,
1371  * take extra steps to flush any blocks which might be in the cache.
1372  */
1373 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1374 {
1375         struct inode *inode = mapping->host;
1376         journal_t *journal;
1377         int err;
1378
1379         if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1380                 /*
1381                  * This is a REALLY heavyweight approach, but the use of
1382                  * bmap on dirty files is expected to be extremely rare:
1383                  * only if we run lilo or swapon on a freshly made file
1384                  * do we expect this to happen.
1385                  *
1386                  * (bmap requires CAP_SYS_RAWIO so this does not
1387                  * represent an unprivileged user DOS attack --- we'd be
1388                  * in trouble if mortal users could trigger this path at
1389                  * will.)
1390                  *
1391                  * NB. EXT4_STATE_JDATA is not set on files other than
1392                  * regular files.  If somebody wants to bmap a directory
1393                  * or symlink and gets confused because the buffer
1394                  * hasn't yet been flushed to disk, they deserve
1395                  * everything they get.
1396                  */
1397
1398                 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1399                 journal = EXT4_JOURNAL(inode);
1400                 jbd2_journal_lock_updates(journal);
1401                 err = jbd2_journal_flush(journal);
1402                 jbd2_journal_unlock_updates(journal);
1403
1404                 if (err)
1405                         return 0;
1406         }
1407
1408         return generic_block_bmap(mapping,block,ext4_get_block);
1409 }
1410
1411 static int bget_one(handle_t *handle, struct buffer_head *bh)
1412 {
1413         get_bh(bh);
1414         return 0;
1415 }
1416
1417 static int bput_one(handle_t *handle, struct buffer_head *bh)
1418 {
1419         put_bh(bh);
1420         return 0;
1421 }
1422
1423 static int jbd2_journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
1424 {
1425         if (buffer_mapped(bh))
1426                 return ext4_journal_dirty_data(handle, bh);
1427         return 0;
1428 }
1429
1430 /*
1431  * Note that we always start a transaction even if we're not journalling
1432  * data.  This is to preserve ordering: any hole instantiation within
1433  * __block_write_full_page -> ext4_get_block() should be journalled
1434  * along with the data so we don't crash and then get metadata which
1435  * refers to old data.
1436  *
1437  * In all journalling modes block_write_full_page() will start the I/O.
1438  *
1439  * Problem:
1440  *
1441  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1442  *              ext4_writepage()
1443  *
1444  * Similar for:
1445  *
1446  *      ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1447  *
1448  * Same applies to ext4_get_block().  We will deadlock on various things like
1449  * lock_journal and i_truncate_mutex.
1450  *
1451  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1452  * allocations fail.
1453  *
1454  * 16May01: If we're reentered then journal_current_handle() will be
1455  *          non-zero. We simply *return*.
1456  *
1457  * 1 July 2001: @@@ FIXME:
1458  *   In journalled data mode, a data buffer may be metadata against the
1459  *   current transaction.  But the same file is part of a shared mapping
1460  *   and someone does a writepage() on it.
1461  *
1462  *   We will move the buffer onto the async_data list, but *after* it has
1463  *   been dirtied. So there's a small window where we have dirty data on
1464  *   BJ_Metadata.
1465  *
1466  *   Note that this only applies to the last partial page in the file.  The
1467  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1468  *   broken code anyway: it's wrong for msync()).
1469  *
1470  *   It's a rare case: affects the final partial page, for journalled data
1471  *   where the file is subject to bith write() and writepage() in the same
1472  *   transction.  To fix it we'll need a custom block_write_full_page().
1473  *   We'll probably need that anyway for journalling writepage() output.
1474  *
1475  * We don't honour synchronous mounts for writepage().  That would be
1476  * disastrous.  Any write() or metadata operation will sync the fs for
1477  * us.
1478  *
1479  * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
1480  * we don't need to open a transaction here.
1481  */
1482 static int ext4_ordered_writepage(struct page *page,
1483                                 struct writeback_control *wbc)
1484 {
1485         struct inode *inode = page->mapping->host;
1486         struct buffer_head *page_bufs;
1487         handle_t *handle = NULL;
1488         int ret = 0;
1489         int err;
1490
1491         J_ASSERT(PageLocked(page));
1492
1493         /*
1494          * We give up here if we're reentered, because it might be for a
1495          * different filesystem.
1496          */
1497         if (ext4_journal_current_handle())
1498                 goto out_fail;
1499
1500         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1501
1502         if (IS_ERR(handle)) {
1503                 ret = PTR_ERR(handle);
1504                 goto out_fail;
1505         }
1506
1507         if (!page_has_buffers(page)) {
1508                 create_empty_buffers(page, inode->i_sb->s_blocksize,
1509                                 (1 << BH_Dirty)|(1 << BH_Uptodate));
1510         }
1511         page_bufs = page_buffers(page);
1512         walk_page_buffers(handle, page_bufs, 0,
1513                         PAGE_CACHE_SIZE, NULL, bget_one);
1514
1515         ret = block_write_full_page(page, ext4_get_block, wbc);
1516
1517         /*
1518          * The page can become unlocked at any point now, and
1519          * truncate can then come in and change things.  So we
1520          * can't touch *page from now on.  But *page_bufs is
1521          * safe due to elevated refcount.
1522          */
1523
1524         /*
1525          * And attach them to the current transaction.  But only if
1526          * block_write_full_page() succeeded.  Otherwise they are unmapped,
1527          * and generally junk.
1528          */
1529         if (ret == 0) {
1530                 err = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
1531                                         NULL, jbd2_journal_dirty_data_fn);
1532                 if (!ret)
1533                         ret = err;
1534         }
1535         walk_page_buffers(handle, page_bufs, 0,
1536                         PAGE_CACHE_SIZE, NULL, bput_one);
1537         err = ext4_journal_stop(handle);
1538         if (!ret)
1539                 ret = err;
1540         return ret;
1541
1542 out_fail:
1543         redirty_page_for_writepage(wbc, page);
1544         unlock_page(page);
1545         return ret;
1546 }
1547
1548 static int ext4_writeback_writepage(struct page *page,
1549                                 struct writeback_control *wbc)
1550 {
1551         struct inode *inode = page->mapping->host;
1552         handle_t *handle = NULL;
1553         int ret = 0;
1554         int err;
1555
1556         if (ext4_journal_current_handle())
1557                 goto out_fail;
1558
1559         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1560         if (IS_ERR(handle)) {
1561                 ret = PTR_ERR(handle);
1562                 goto out_fail;
1563         }
1564
1565         if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
1566                 ret = nobh_writepage(page, ext4_get_block, wbc);
1567         else
1568                 ret = block_write_full_page(page, ext4_get_block, wbc);
1569
1570         err = ext4_journal_stop(handle);
1571         if (!ret)
1572                 ret = err;
1573         return ret;
1574
1575 out_fail:
1576         redirty_page_for_writepage(wbc, page);
1577         unlock_page(page);
1578         return ret;
1579 }
1580
1581 static int ext4_journalled_writepage(struct page *page,
1582                                 struct writeback_control *wbc)
1583 {
1584         struct inode *inode = page->mapping->host;
1585         handle_t *handle = NULL;
1586         int ret = 0;
1587         int err;
1588
1589         if (ext4_journal_current_handle())
1590                 goto no_write;
1591
1592         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1593         if (IS_ERR(handle)) {
1594                 ret = PTR_ERR(handle);
1595                 goto no_write;
1596         }
1597
1598         if (!page_has_buffers(page) || PageChecked(page)) {
1599                 /*
1600                  * It's mmapped pagecache.  Add buffers and journal it.  There
1601                  * doesn't seem much point in redirtying the page here.
1602                  */
1603                 ClearPageChecked(page);
1604                 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
1605                                         ext4_get_block);
1606                 if (ret != 0) {
1607                         ext4_journal_stop(handle);
1608                         goto out_unlock;
1609                 }
1610                 ret = walk_page_buffers(handle, page_buffers(page), 0,
1611                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1612
1613                 err = walk_page_buffers(handle, page_buffers(page), 0,
1614                                 PAGE_CACHE_SIZE, NULL, write_end_fn);
1615                 if (ret == 0)
1616                         ret = err;
1617                 EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1618                 unlock_page(page);
1619         } else {
1620                 /*
1621                  * It may be a page full of checkpoint-mode buffers.  We don't
1622                  * really know unless we go poke around in the buffer_heads.
1623                  * But block_write_full_page will do the right thing.
1624                  */
1625                 ret = block_write_full_page(page, ext4_get_block, wbc);
1626         }
1627         err = ext4_journal_stop(handle);
1628         if (!ret)
1629                 ret = err;
1630 out:
1631         return ret;
1632
1633 no_write:
1634         redirty_page_for_writepage(wbc, page);
1635 out_unlock:
1636         unlock_page(page);
1637         goto out;
1638 }
1639
1640 static int ext4_readpage(struct file *file, struct page *page)
1641 {
1642         return mpage_readpage(page, ext4_get_block);
1643 }
1644
1645 static int
1646 ext4_readpages(struct file *file, struct address_space *mapping,
1647                 struct list_head *pages, unsigned nr_pages)
1648 {
1649         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1650 }
1651
1652 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1653 {
1654         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1655
1656         /*
1657          * If it's a full truncate we just forget about the pending dirtying
1658          */
1659         if (offset == 0)
1660                 ClearPageChecked(page);
1661
1662         jbd2_journal_invalidatepage(journal, page, offset);
1663 }
1664
1665 static int ext4_releasepage(struct page *page, gfp_t wait)
1666 {
1667         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1668
1669         WARN_ON(PageChecked(page));
1670         if (!page_has_buffers(page))
1671                 return 0;
1672         return jbd2_journal_try_to_free_buffers(journal, page, wait);
1673 }
1674
1675 /*
1676  * If the O_DIRECT write will extend the file then add this inode to the
1677  * orphan list.  So recovery will truncate it back to the original size
1678  * if the machine crashes during the write.
1679  *
1680  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1681  * crashes then stale disk data _may_ be exposed inside the file.
1682  */
1683 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1684                         const struct iovec *iov, loff_t offset,
1685                         unsigned long nr_segs)
1686 {
1687         struct file *file = iocb->ki_filp;
1688         struct inode *inode = file->f_mapping->host;
1689         struct ext4_inode_info *ei = EXT4_I(inode);
1690         handle_t *handle = NULL;
1691         ssize_t ret;
1692         int orphan = 0;
1693         size_t count = iov_length(iov, nr_segs);
1694
1695         if (rw == WRITE) {
1696                 loff_t final_size = offset + count;
1697
1698                 handle = ext4_journal_start(inode, DIO_CREDITS);
1699                 if (IS_ERR(handle)) {
1700                         ret = PTR_ERR(handle);
1701                         goto out;
1702                 }
1703                 if (final_size > inode->i_size) {
1704                         ret = ext4_orphan_add(handle, inode);
1705                         if (ret)
1706                                 goto out_stop;
1707                         orphan = 1;
1708                         ei->i_disksize = inode->i_size;
1709                 }
1710         }
1711
1712         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1713                                  offset, nr_segs,
1714                                  ext4_get_block, NULL);
1715
1716         /*
1717          * Reacquire the handle: ext4_get_block() can restart the transaction
1718          */
1719         handle = ext4_journal_current_handle();
1720
1721 out_stop:
1722         if (handle) {
1723                 int err;
1724
1725                 if (orphan && inode->i_nlink)
1726                         ext4_orphan_del(handle, inode);
1727                 if (orphan && ret > 0) {
1728                         loff_t end = offset + ret;
1729                         if (end > inode->i_size) {
1730                                 ei->i_disksize = end;
1731                                 i_size_write(inode, end);
1732                                 /*
1733                                  * We're going to return a positive `ret'
1734                                  * here due to non-zero-length I/O, so there's
1735                                  * no way of reporting error returns from
1736                                  * ext4_mark_inode_dirty() to userspace.  So
1737                                  * ignore it.
1738                                  */
1739                                 ext4_mark_inode_dirty(handle, inode);
1740                         }
1741                 }
1742                 err = ext4_journal_stop(handle);
1743                 if (ret == 0)
1744                         ret = err;
1745         }
1746 out:
1747         return ret;
1748 }
1749
1750 /*
1751  * Pages can be marked dirty completely asynchronously from ext4's journalling
1752  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1753  * much here because ->set_page_dirty is called under VFS locks.  The page is
1754  * not necessarily locked.
1755  *
1756  * We cannot just dirty the page and leave attached buffers clean, because the
1757  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1758  * or jbddirty because all the journalling code will explode.
1759  *
1760  * So what we do is to mark the page "pending dirty" and next time writepage
1761  * is called, propagate that into the buffers appropriately.
1762  */
1763 static int ext4_journalled_set_page_dirty(struct page *page)
1764 {
1765         SetPageChecked(page);
1766         return __set_page_dirty_nobuffers(page);
1767 }
1768
1769 static const struct address_space_operations ext4_ordered_aops = {
1770         .readpage       = ext4_readpage,
1771         .readpages      = ext4_readpages,
1772         .writepage      = ext4_ordered_writepage,
1773         .sync_page      = block_sync_page,
1774         .write_begin    = ext4_write_begin,
1775         .write_end      = ext4_ordered_write_end,
1776         .bmap           = ext4_bmap,
1777         .invalidatepage = ext4_invalidatepage,
1778         .releasepage    = ext4_releasepage,
1779         .direct_IO      = ext4_direct_IO,
1780         .migratepage    = buffer_migrate_page,
1781 };
1782
1783 static const struct address_space_operations ext4_writeback_aops = {
1784         .readpage       = ext4_readpage,
1785         .readpages      = ext4_readpages,
1786         .writepage      = ext4_writeback_writepage,
1787         .sync_page      = block_sync_page,
1788         .write_begin    = ext4_write_begin,
1789         .write_end      = ext4_writeback_write_end,
1790         .bmap           = ext4_bmap,
1791         .invalidatepage = ext4_invalidatepage,
1792         .releasepage    = ext4_releasepage,
1793         .direct_IO      = ext4_direct_IO,
1794         .migratepage    = buffer_migrate_page,
1795 };
1796
1797 static const struct address_space_operations ext4_journalled_aops = {
1798         .readpage       = ext4_readpage,
1799         .readpages      = ext4_readpages,
1800         .writepage      = ext4_journalled_writepage,
1801         .sync_page      = block_sync_page,
1802         .write_begin    = ext4_write_begin,
1803         .write_end      = ext4_journalled_write_end,
1804         .set_page_dirty = ext4_journalled_set_page_dirty,
1805         .bmap           = ext4_bmap,
1806         .invalidatepage = ext4_invalidatepage,
1807         .releasepage    = ext4_releasepage,
1808 };
1809
1810 void ext4_set_aops(struct inode *inode)
1811 {
1812         if (ext4_should_order_data(inode))
1813                 inode->i_mapping->a_ops = &ext4_ordered_aops;
1814         else if (ext4_should_writeback_data(inode))
1815                 inode->i_mapping->a_ops = &ext4_writeback_aops;
1816         else
1817                 inode->i_mapping->a_ops = &ext4_journalled_aops;
1818 }
1819
1820 /*
1821  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1822  * up to the end of the block which corresponds to `from'.
1823  * This required during truncate. We need to physically zero the tail end
1824  * of that block so it doesn't yield old data if the file is later grown.
1825  */
1826 int ext4_block_truncate_page(handle_t *handle, struct page *page,
1827                 struct address_space *mapping, loff_t from)
1828 {
1829         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1830         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1831         unsigned blocksize, iblock, length, pos;
1832         struct inode *inode = mapping->host;
1833         struct buffer_head *bh;
1834         int err = 0;
1835
1836         blocksize = inode->i_sb->s_blocksize;
1837         length = blocksize - (offset & (blocksize - 1));
1838         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1839
1840         /*
1841          * For "nobh" option,  we can only work if we don't need to
1842          * read-in the page - otherwise we create buffers to do the IO.
1843          */
1844         if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1845              ext4_should_writeback_data(inode) && PageUptodate(page)) {
1846                 zero_user_page(page, offset, length, KM_USER0);
1847                 set_page_dirty(page);
1848                 goto unlock;
1849         }
1850
1851         if (!page_has_buffers(page))
1852                 create_empty_buffers(page, blocksize, 0);
1853
1854         /* Find the buffer that contains "offset" */
1855         bh = page_buffers(page);
1856         pos = blocksize;
1857         while (offset >= pos) {
1858                 bh = bh->b_this_page;
1859                 iblock++;
1860                 pos += blocksize;
1861         }
1862
1863         err = 0;
1864         if (buffer_freed(bh)) {
1865                 BUFFER_TRACE(bh, "freed: skip");
1866                 goto unlock;
1867         }
1868
1869         if (!buffer_mapped(bh)) {
1870                 BUFFER_TRACE(bh, "unmapped");
1871                 ext4_get_block(inode, iblock, bh, 0);
1872                 /* unmapped? It's a hole - nothing to do */
1873                 if (!buffer_mapped(bh)) {
1874                         BUFFER_TRACE(bh, "still unmapped");
1875                         goto unlock;
1876                 }
1877         }
1878
1879         /* Ok, it's mapped. Make sure it's up-to-date */
1880         if (PageUptodate(page))
1881                 set_buffer_uptodate(bh);
1882
1883         if (!buffer_uptodate(bh)) {
1884                 err = -EIO;
1885                 ll_rw_block(READ, 1, &bh);
1886                 wait_on_buffer(bh);
1887                 /* Uhhuh. Read error. Complain and punt. */
1888                 if (!buffer_uptodate(bh))
1889                         goto unlock;
1890         }
1891
1892         if (ext4_should_journal_data(inode)) {
1893                 BUFFER_TRACE(bh, "get write access");
1894                 err = ext4_journal_get_write_access(handle, bh);
1895                 if (err)
1896                         goto unlock;
1897         }
1898
1899         zero_user_page(page, offset, length, KM_USER0);
1900
1901         BUFFER_TRACE(bh, "zeroed end of block");
1902
1903         err = 0;
1904         if (ext4_should_journal_data(inode)) {
1905                 err = ext4_journal_dirty_metadata(handle, bh);
1906         } else {
1907                 if (ext4_should_order_data(inode))
1908                         err = ext4_journal_dirty_data(handle, bh);
1909                 mark_buffer_dirty(bh);
1910         }
1911
1912 unlock:
1913         unlock_page(page);
1914         page_cache_release(page);
1915         return err;
1916 }
1917
1918 /*
1919  * Probably it should be a library function... search for first non-zero word
1920  * or memcmp with zero_page, whatever is better for particular architecture.
1921  * Linus?
1922  */
1923 static inline int all_zeroes(__le32 *p, __le32 *q)
1924 {
1925         while (p < q)
1926                 if (*p++)
1927                         return 0;
1928         return 1;
1929 }
1930
1931 /**
1932  *      ext4_find_shared - find the indirect blocks for partial truncation.
1933  *      @inode:   inode in question
1934  *      @depth:   depth of the affected branch
1935  *      @offsets: offsets of pointers in that branch (see ext4_block_to_path)
1936  *      @chain:   place to store the pointers to partial indirect blocks
1937  *      @top:     place to the (detached) top of branch
1938  *
1939  *      This is a helper function used by ext4_truncate().
1940  *
1941  *      When we do truncate() we may have to clean the ends of several
1942  *      indirect blocks but leave the blocks themselves alive. Block is
1943  *      partially truncated if some data below the new i_size is refered
1944  *      from it (and it is on the path to the first completely truncated
1945  *      data block, indeed).  We have to free the top of that path along
1946  *      with everything to the right of the path. Since no allocation
1947  *      past the truncation point is possible until ext4_truncate()
1948  *      finishes, we may safely do the latter, but top of branch may
1949  *      require special attention - pageout below the truncation point
1950  *      might try to populate it.
1951  *
1952  *      We atomically detach the top of branch from the tree, store the
1953  *      block number of its root in *@top, pointers to buffer_heads of
1954  *      partially truncated blocks - in @chain[].bh and pointers to
1955  *      their last elements that should not be removed - in
1956  *      @chain[].p. Return value is the pointer to last filled element
1957  *      of @chain.
1958  *
1959  *      The work left to caller to do the actual freeing of subtrees:
1960  *              a) free the subtree starting from *@top
1961  *              b) free the subtrees whose roots are stored in
1962  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1963  *              c) free the subtrees growing from the inode past the @chain[0].
1964  *                      (no partially truncated stuff there).  */
1965
1966 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1967                         int offsets[4], Indirect chain[4], __le32 *top)
1968 {
1969         Indirect *partial, *p;
1970         int k, err;
1971
1972         *top = 0;
1973         /* Make k index the deepest non-null offest + 1 */
1974         for (k = depth; k > 1 && !offsets[k-1]; k--)
1975                 ;
1976         partial = ext4_get_branch(inode, k, offsets, chain, &err);
1977         /* Writer: pointers */
1978         if (!partial)
1979                 partial = chain + k-1;
1980         /*
1981          * If the branch acquired continuation since we've looked at it -
1982          * fine, it should all survive and (new) top doesn't belong to us.
1983          */
1984         if (!partial->key && *partial->p)
1985                 /* Writer: end */
1986                 goto no_top;
1987         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
1988                 ;
1989         /*
1990          * OK, we've found the last block that must survive. The rest of our
1991          * branch should be detached before unlocking. However, if that rest
1992          * of branch is all ours and does not grow immediately from the inode
1993          * it's easier to cheat and just decrement partial->p.
1994          */
1995         if (p == chain + k - 1 && p > chain) {
1996                 p->p--;
1997         } else {
1998                 *top = *p->p;
1999                 /* Nope, don't do this in ext4.  Must leave the tree intact */
2000 #if 0
2001                 *p->p = 0;
2002 #endif
2003         }
2004         /* Writer: end */
2005
2006         while(partial > p) {
2007                 brelse(partial->bh);
2008                 partial--;
2009         }
2010 no_top:
2011         return partial;
2012 }
2013
2014 /*
2015  * Zero a number of block pointers in either an inode or an indirect block.
2016  * If we restart the transaction we must again get write access to the
2017  * indirect block for further modification.
2018  *
2019  * We release `count' blocks on disk, but (last - first) may be greater
2020  * than `count' because there can be holes in there.
2021  */
2022 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2023                 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2024                 unsigned long count, __le32 *first, __le32 *last)
2025 {
2026         __le32 *p;
2027         if (try_to_extend_transaction(handle, inode)) {
2028                 if (bh) {
2029                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2030                         ext4_journal_dirty_metadata(handle, bh);
2031                 }
2032                 ext4_mark_inode_dirty(handle, inode);
2033                 ext4_journal_test_restart(handle, inode);
2034                 if (bh) {
2035                         BUFFER_TRACE(bh, "retaking write access");
2036                         ext4_journal_get_write_access(handle, bh);
2037                 }
2038         }
2039
2040         /*
2041          * Any buffers which are on the journal will be in memory. We find
2042          * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2043          * on them.  We've already detached each block from the file, so
2044          * bforget() in jbd2_journal_forget() should be safe.
2045          *
2046          * AKPM: turn on bforget in jbd2_journal_forget()!!!
2047          */
2048         for (p = first; p < last; p++) {
2049                 u32 nr = le32_to_cpu(*p);
2050                 if (nr) {
2051                         struct buffer_head *bh;
2052
2053                         *p = 0;
2054                         bh = sb_find_get_block(inode->i_sb, nr);
2055                         ext4_forget(handle, 0, inode, bh, nr);
2056                 }
2057         }
2058
2059         ext4_free_blocks(handle, inode, block_to_free, count);
2060 }
2061
2062 /**
2063  * ext4_free_data - free a list of data blocks
2064  * @handle:     handle for this transaction
2065  * @inode:      inode we are dealing with
2066  * @this_bh:    indirect buffer_head which contains *@first and *@last
2067  * @first:      array of block numbers
2068  * @last:       points immediately past the end of array
2069  *
2070  * We are freeing all blocks refered from that array (numbers are stored as
2071  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2072  *
2073  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2074  * blocks are contiguous then releasing them at one time will only affect one
2075  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2076  * actually use a lot of journal space.
2077  *
2078  * @this_bh will be %NULL if @first and @last point into the inode's direct
2079  * block pointers.
2080  */
2081 static void ext4_free_data(handle_t *handle, struct inode *inode,
2082                            struct buffer_head *this_bh,
2083                            __le32 *first, __le32 *last)
2084 {
2085         ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2086         unsigned long count = 0;            /* Number of blocks in the run */
2087         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
2088                                                corresponding to
2089                                                block_to_free */
2090         ext4_fsblk_t nr;                    /* Current block # */
2091         __le32 *p;                          /* Pointer into inode/ind
2092                                                for current block */
2093         int err;
2094
2095         if (this_bh) {                          /* For indirect block */
2096                 BUFFER_TRACE(this_bh, "get_write_access");
2097                 err = ext4_journal_get_write_access(handle, this_bh);
2098                 /* Important: if we can't update the indirect pointers
2099                  * to the blocks, we can't free them. */
2100                 if (err)
2101                         return;
2102         }
2103
2104         for (p = first; p < last; p++) {
2105                 nr = le32_to_cpu(*p);
2106                 if (nr) {
2107                         /* accumulate blocks to free if they're contiguous */
2108                         if (count == 0) {
2109                                 block_to_free = nr;
2110                                 block_to_free_p = p;
2111                                 count = 1;
2112                         } else if (nr == block_to_free + count) {
2113                                 count++;
2114                         } else {
2115                                 ext4_clear_blocks(handle, inode, this_bh,
2116                                                   block_to_free,
2117                                                   count, block_to_free_p, p);
2118                                 block_to_free = nr;
2119                                 block_to_free_p = p;
2120                                 count = 1;
2121                         }
2122                 }
2123         }
2124
2125         if (count > 0)
2126                 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2127                                   count, block_to_free_p, p);
2128
2129         if (this_bh) {
2130                 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2131                 ext4_journal_dirty_metadata(handle, this_bh);
2132         }
2133 }
2134
2135 /**
2136  *      ext4_free_branches - free an array of branches
2137  *      @handle: JBD handle for this transaction
2138  *      @inode: inode we are dealing with
2139  *      @parent_bh: the buffer_head which contains *@first and *@last
2140  *      @first: array of block numbers
2141  *      @last:  pointer immediately past the end of array
2142  *      @depth: depth of the branches to free
2143  *
2144  *      We are freeing all blocks refered from these branches (numbers are
2145  *      stored as little-endian 32-bit) and updating @inode->i_blocks
2146  *      appropriately.
2147  */
2148 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2149                                struct buffer_head *parent_bh,
2150                                __le32 *first, __le32 *last, int depth)
2151 {
2152         ext4_fsblk_t nr;
2153         __le32 *p;
2154
2155         if (is_handle_aborted(handle))
2156                 return;
2157
2158         if (depth--) {
2159                 struct buffer_head *bh;
2160                 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2161                 p = last;
2162                 while (--p >= first) {
2163                         nr = le32_to_cpu(*p);
2164                         if (!nr)
2165                                 continue;               /* A hole */
2166
2167                         /* Go read the buffer for the next level down */
2168                         bh = sb_bread(inode->i_sb, nr);
2169
2170                         /*
2171                          * A read failure? Report error and clear slot
2172                          * (should be rare).
2173                          */
2174                         if (!bh) {
2175                                 ext4_error(inode->i_sb, "ext4_free_branches",
2176                                            "Read failure, inode=%lu, block=%llu",
2177                                            inode->i_ino, nr);
2178                                 continue;
2179                         }
2180
2181                         /* This zaps the entire block.  Bottom up. */
2182                         BUFFER_TRACE(bh, "free child branches");
2183                         ext4_free_branches(handle, inode, bh,
2184                                            (__le32*)bh->b_data,
2185                                            (__le32*)bh->b_data + addr_per_block,
2186                                            depth);
2187
2188                         /*
2189                          * We've probably journalled the indirect block several
2190                          * times during the truncate.  But it's no longer
2191                          * needed and we now drop it from the transaction via
2192                          * jbd2_journal_revoke().
2193                          *
2194                          * That's easy if it's exclusively part of this
2195                          * transaction.  But if it's part of the committing
2196                          * transaction then jbd2_journal_forget() will simply
2197                          * brelse() it.  That means that if the underlying
2198                          * block is reallocated in ext4_get_block(),
2199                          * unmap_underlying_metadata() will find this block
2200                          * and will try to get rid of it.  damn, damn.
2201                          *
2202                          * If this block has already been committed to the
2203                          * journal, a revoke record will be written.  And
2204                          * revoke records must be emitted *before* clearing
2205                          * this block's bit in the bitmaps.
2206                          */
2207                         ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2208
2209                         /*
2210                          * Everything below this this pointer has been
2211                          * released.  Now let this top-of-subtree go.
2212                          *
2213                          * We want the freeing of this indirect block to be
2214                          * atomic in the journal with the updating of the
2215                          * bitmap block which owns it.  So make some room in
2216                          * the journal.
2217                          *
2218                          * We zero the parent pointer *after* freeing its
2219                          * pointee in the bitmaps, so if extend_transaction()
2220                          * for some reason fails to put the bitmap changes and
2221                          * the release into the same transaction, recovery
2222                          * will merely complain about releasing a free block,
2223                          * rather than leaking blocks.
2224                          */
2225                         if (is_handle_aborted(handle))
2226                                 return;
2227                         if (try_to_extend_transaction(handle, inode)) {
2228                                 ext4_mark_inode_dirty(handle, inode);
2229                                 ext4_journal_test_restart(handle, inode);
2230                         }
2231
2232                         ext4_free_blocks(handle, inode, nr, 1);
2233
2234                         if (parent_bh) {
2235                                 /*
2236                                  * The block which we have just freed is
2237                                  * pointed to by an indirect block: journal it
2238                                  */
2239                                 BUFFER_TRACE(parent_bh, "get_write_access");
2240                                 if (!ext4_journal_get_write_access(handle,
2241                                                                    parent_bh)){
2242                                         *p = 0;
2243                                         BUFFER_TRACE(parent_bh,
2244                                         "call ext4_journal_dirty_metadata");
2245                                         ext4_journal_dirty_metadata(handle,
2246                                                                     parent_bh);
2247                                 }
2248                         }
2249                 }
2250         } else {
2251                 /* We have reached the bottom of the tree. */
2252                 BUFFER_TRACE(parent_bh, "free data blocks");
2253                 ext4_free_data(handle, inode, parent_bh, first, last);
2254         }
2255 }
2256
2257 /*
2258  * ext4_truncate()
2259  *
2260  * We block out ext4_get_block() block instantiations across the entire
2261  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2262  * simultaneously on behalf of the same inode.
2263  *
2264  * As we work through the truncate and commmit bits of it to the journal there
2265  * is one core, guiding principle: the file's tree must always be consistent on
2266  * disk.  We must be able to restart the truncate after a crash.
2267  *
2268  * The file's tree may be transiently inconsistent in memory (although it
2269  * probably isn't), but whenever we close off and commit a journal transaction,
2270  * the contents of (the filesystem + the journal) must be consistent and
2271  * restartable.  It's pretty simple, really: bottom up, right to left (although
2272  * left-to-right works OK too).
2273  *
2274  * Note that at recovery time, journal replay occurs *before* the restart of
2275  * truncate against the orphan inode list.
2276  *
2277  * The committed inode has the new, desired i_size (which is the same as
2278  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2279  * that this inode's truncate did not complete and it will again call
2280  * ext4_truncate() to have another go.  So there will be instantiated blocks
2281  * to the right of the truncation point in a crashed ext4 filesystem.  But
2282  * that's fine - as long as they are linked from the inode, the post-crash
2283  * ext4_truncate() run will find them and release them.
2284  */
2285 void ext4_truncate(struct inode *inode)
2286 {
2287         handle_t *handle;
2288         struct ext4_inode_info *ei = EXT4_I(inode);
2289         __le32 *i_data = ei->i_data;
2290         int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2291         struct address_space *mapping = inode->i_mapping;
2292         int offsets[4];
2293         Indirect chain[4];
2294         Indirect *partial;
2295         __le32 nr = 0;
2296         int n;
2297         long last_block;
2298         unsigned blocksize = inode->i_sb->s_blocksize;
2299         struct page *page;
2300
2301         if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
2302             S_ISLNK(inode->i_mode)))
2303                 return;
2304         if (ext4_inode_is_fast_symlink(inode))
2305                 return;
2306         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2307                 return;
2308
2309         /*
2310          * We have to lock the EOF page here, because lock_page() nests
2311          * outside jbd2_journal_start().
2312          */
2313         if ((inode->i_size & (blocksize - 1)) == 0) {
2314                 /* Block boundary? Nothing to do */
2315                 page = NULL;
2316         } else {
2317                 page = grab_cache_page(mapping,
2318                                 inode->i_size >> PAGE_CACHE_SHIFT);
2319                 if (!page)
2320                         return;
2321         }
2322
2323         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
2324                 return ext4_ext_truncate(inode, page);
2325
2326         handle = start_transaction(inode);
2327         if (IS_ERR(handle)) {
2328                 if (page) {
2329                         clear_highpage(page);
2330                         flush_dcache_page(page);
2331                         unlock_page(page);
2332                         page_cache_release(page);
2333                 }
2334                 return;         /* AKPM: return what? */
2335         }
2336
2337         last_block = (inode->i_size + blocksize-1)
2338                                         >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2339
2340         if (page)
2341                 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2342
2343         n = ext4_block_to_path(inode, last_block, offsets, NULL);
2344         if (n == 0)
2345                 goto out_stop;  /* error */
2346
2347         /*
2348          * OK.  This truncate is going to happen.  We add the inode to the
2349          * orphan list, so that if this truncate spans multiple transactions,
2350          * and we crash, we will resume the truncate when the filesystem
2351          * recovers.  It also marks the inode dirty, to catch the new size.
2352          *
2353          * Implication: the file must always be in a sane, consistent
2354          * truncatable state while each transaction commits.
2355          */
2356         if (ext4_orphan_add(handle, inode))
2357                 goto out_stop;
2358
2359         /*
2360          * The orphan list entry will now protect us from any crash which
2361          * occurs before the truncate completes, so it is now safe to propagate
2362          * the new, shorter inode size (held for now in i_size) into the
2363          * on-disk inode. We do this via i_disksize, which is the value which
2364          * ext4 *really* writes onto the disk inode.
2365          */
2366         ei->i_disksize = inode->i_size;
2367
2368         /*
2369          * From here we block out all ext4_get_block() callers who want to
2370          * modify the block allocation tree.
2371          */
2372         mutex_lock(&ei->truncate_mutex);
2373
2374         if (n == 1) {           /* direct blocks */
2375                 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2376                                i_data + EXT4_NDIR_BLOCKS);
2377                 goto do_indirects;
2378         }
2379
2380         partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2381         /* Kill the top of shared branch (not detached) */
2382         if (nr) {
2383                 if (partial == chain) {
2384                         /* Shared branch grows from the inode */
2385                         ext4_free_branches(handle, inode, NULL,
2386                                            &nr, &nr+1, (chain+n-1) - partial);
2387                         *partial->p = 0;
2388                         /*
2389                          * We mark the inode dirty prior to restart,
2390                          * and prior to stop.  No need for it here.
2391                          */
2392                 } else {
2393                         /* Shared branch grows from an indirect block */
2394                         BUFFER_TRACE(partial->bh, "get_write_access");
2395                         ext4_free_branches(handle, inode, partial->bh,
2396                                         partial->p,
2397                                         partial->p+1, (chain+n-1) - partial);
2398                 }
2399         }
2400         /* Clear the ends of indirect blocks on the shared branch */
2401         while (partial > chain) {
2402                 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2403                                    (__le32*)partial->bh->b_data+addr_per_block,
2404                                    (chain+n-1) - partial);
2405                 BUFFER_TRACE(partial->bh, "call brelse");
2406                 brelse (partial->bh);
2407                 partial--;
2408         }
2409 do_indirects:
2410         /* Kill the remaining (whole) subtrees */
2411         switch (offsets[0]) {
2412         default:
2413                 nr = i_data[EXT4_IND_BLOCK];
2414                 if (nr) {
2415                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2416                         i_data[EXT4_IND_BLOCK] = 0;
2417                 }
2418         case EXT4_IND_BLOCK:
2419                 nr = i_data[EXT4_DIND_BLOCK];
2420                 if (nr) {
2421                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2422                         i_data[EXT4_DIND_BLOCK] = 0;
2423                 }
2424         case EXT4_DIND_BLOCK:
2425                 nr = i_data[EXT4_TIND_BLOCK];
2426                 if (nr) {
2427                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2428                         i_data[EXT4_TIND_BLOCK] = 0;
2429                 }
2430         case EXT4_TIND_BLOCK:
2431                 ;
2432         }
2433
2434         ext4_discard_reservation(inode);
2435
2436         mutex_unlock(&ei->truncate_mutex);
2437         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2438         ext4_mark_inode_dirty(handle, inode);
2439
2440         /*
2441          * In a multi-transaction truncate, we only make the final transaction
2442          * synchronous
2443          */
2444         if (IS_SYNC(inode))
2445                 handle->h_sync = 1;
2446 out_stop:
2447         /*
2448          * If this was a simple ftruncate(), and the file will remain alive
2449          * then we need to clear up the orphan record which we created above.
2450          * However, if this was a real unlink then we were called by
2451          * ext4_delete_inode(), and we allow that function to clean up the
2452          * orphan info for us.
2453          */
2454         if (inode->i_nlink)
2455                 ext4_orphan_del(handle, inode);
2456
2457         ext4_journal_stop(handle);
2458 }
2459
2460 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2461                 unsigned long ino, struct ext4_iloc *iloc)
2462 {
2463         unsigned long desc, group_desc, block_group;
2464         unsigned long offset;
2465         ext4_fsblk_t block;
2466         struct buffer_head *bh;
2467         struct ext4_group_desc * gdp;
2468
2469         if (!ext4_valid_inum(sb, ino)) {
2470                 /*
2471                  * This error is already checked for in namei.c unless we are
2472                  * looking at an NFS filehandle, in which case no error
2473                  * report is needed
2474                  */
2475                 return 0;
2476         }
2477
2478         block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2479         if (block_group >= EXT4_SB(sb)->s_groups_count) {
2480                 ext4_error(sb,"ext4_get_inode_block","group >= groups count");
2481                 return 0;
2482         }
2483         smp_rmb();
2484         group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
2485         desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2486         bh = EXT4_SB(sb)->s_group_desc[group_desc];
2487         if (!bh) {
2488                 ext4_error (sb, "ext4_get_inode_block",
2489                             "Descriptor not loaded");
2490                 return 0;
2491         }
2492
2493         gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
2494                 desc * EXT4_DESC_SIZE(sb));
2495         /*
2496          * Figure out the offset within the block group inode table
2497          */
2498         offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2499                 EXT4_INODE_SIZE(sb);
2500         block = ext4_inode_table(sb, gdp) +
2501                 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2502
2503         iloc->block_group = block_group;
2504         iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2505         return block;
2506 }
2507
2508 /*
2509  * ext4_get_inode_loc returns with an extra refcount against the inode's
2510  * underlying buffer_head on success. If 'in_mem' is true, we have all
2511  * data in memory that is needed to recreate the on-disk version of this
2512  * inode.
2513  */
2514 static int __ext4_get_inode_loc(struct inode *inode,
2515                                 struct ext4_iloc *iloc, int in_mem)
2516 {
2517         ext4_fsblk_t block;
2518         struct buffer_head *bh;
2519
2520         block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2521         if (!block)
2522                 return -EIO;
2523
2524         bh = sb_getblk(inode->i_sb, block);
2525         if (!bh) {
2526                 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2527                                 "unable to read inode block - "
2528                                 "inode=%lu, block=%llu",
2529                                  inode->i_ino, block);
2530                 return -EIO;
2531         }
2532         if (!buffer_uptodate(bh)) {
2533                 lock_buffer(bh);
2534                 if (buffer_uptodate(bh)) {
2535                         /* someone brought it uptodate while we waited */
2536                         unlock_buffer(bh);
2537                         goto has_buffer;
2538                 }
2539
2540                 /*
2541                  * If we have all information of the inode in memory and this
2542                  * is the only valid inode in the block, we need not read the
2543                  * block.
2544                  */
2545                 if (in_mem) {
2546                         struct buffer_head *bitmap_bh;
2547                         struct ext4_group_desc *desc;
2548                         int inodes_per_buffer;
2549                         int inode_offset, i;
2550                         int block_group;
2551                         int start;
2552
2553                         block_group = (inode->i_ino - 1) /
2554                                         EXT4_INODES_PER_GROUP(inode->i_sb);
2555                         inodes_per_buffer = bh->b_size /
2556                                 EXT4_INODE_SIZE(inode->i_sb);
2557                         inode_offset = ((inode->i_ino - 1) %
2558                                         EXT4_INODES_PER_GROUP(inode->i_sb));
2559                         start = inode_offset & ~(inodes_per_buffer - 1);
2560
2561                         /* Is the inode bitmap in cache? */
2562                         desc = ext4_get_group_desc(inode->i_sb,
2563                                                 block_group, NULL);
2564                         if (!desc)
2565                                 goto make_io;
2566
2567                         bitmap_bh = sb_getblk(inode->i_sb,
2568                                 ext4_inode_bitmap(inode->i_sb, desc));
2569                         if (!bitmap_bh)
2570                                 goto make_io;
2571
2572                         /*
2573                          * If the inode bitmap isn't in cache then the
2574                          * optimisation may end up performing two reads instead
2575                          * of one, so skip it.
2576                          */
2577                         if (!buffer_uptodate(bitmap_bh)) {
2578                                 brelse(bitmap_bh);
2579                                 goto make_io;
2580                         }
2581                         for (i = start; i < start + inodes_per_buffer; i++) {
2582                                 if (i == inode_offset)
2583                                         continue;
2584                                 if (ext4_test_bit(i, bitmap_bh->b_data))
2585                                         break;
2586                         }
2587                         brelse(bitmap_bh);
2588                         if (i == start + inodes_per_buffer) {
2589                                 /* all other inodes are free, so skip I/O */
2590                                 memset(bh->b_data, 0, bh->b_size);
2591                                 set_buffer_uptodate(bh);
2592                                 unlock_buffer(bh);
2593                                 goto has_buffer;
2594                         }
2595                 }
2596
2597 make_io:
2598                 /*
2599                  * There are other valid inodes in the buffer, this inode
2600                  * has in-inode xattrs, or we don't have this inode in memory.
2601                  * Read the block from disk.
2602                  */
2603                 get_bh(bh);
2604                 bh->b_end_io = end_buffer_read_sync;
2605                 submit_bh(READ_META, bh);
2606                 wait_on_buffer(bh);
2607                 if (!buffer_uptodate(bh)) {
2608                         ext4_error(inode->i_sb, "ext4_get_inode_loc",
2609                                         "unable to read inode block - "
2610                                         "inode=%lu, block=%llu",
2611                                         inode->i_ino, block);
2612                         brelse(bh);
2613                         return -EIO;
2614                 }
2615         }
2616 has_buffer:
2617         iloc->bh = bh;
2618         return 0;
2619 }
2620
2621 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2622 {
2623         /* We have all inode data except xattrs in memory here. */
2624         return __ext4_get_inode_loc(inode, iloc,
2625                 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2626 }
2627
2628 void ext4_set_inode_flags(struct inode *inode)
2629 {
2630         unsigned int flags = EXT4_I(inode)->i_flags;
2631
2632         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2633         if (flags & EXT4_SYNC_FL)
2634                 inode->i_flags |= S_SYNC;
2635         if (flags & EXT4_APPEND_FL)
2636                 inode->i_flags |= S_APPEND;
2637         if (flags & EXT4_IMMUTABLE_FL)
2638                 inode->i_flags |= S_IMMUTABLE;
2639         if (flags & EXT4_NOATIME_FL)
2640                 inode->i_flags |= S_NOATIME;
2641         if (flags & EXT4_DIRSYNC_FL)
2642                 inode->i_flags |= S_DIRSYNC;
2643 }
2644
2645 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2646 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2647 {
2648         unsigned int flags = ei->vfs_inode.i_flags;
2649
2650         ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2651                         EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2652         if (flags & S_SYNC)
2653                 ei->i_flags |= EXT4_SYNC_FL;
2654         if (flags & S_APPEND)
2655                 ei->i_flags |= EXT4_APPEND_FL;
2656         if (flags & S_IMMUTABLE)
2657                 ei->i_flags |= EXT4_IMMUTABLE_FL;
2658         if (flags & S_NOATIME)
2659                 ei->i_flags |= EXT4_NOATIME_FL;
2660         if (flags & S_DIRSYNC)
2661                 ei->i_flags |= EXT4_DIRSYNC_FL;
2662 }
2663
2664 void ext4_read_inode(struct inode * inode)
2665 {
2666         struct ext4_iloc iloc;
2667         struct ext4_inode *raw_inode;
2668         struct ext4_inode_info *ei = EXT4_I(inode);
2669         struct buffer_head *bh;
2670         int block;
2671
2672 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2673         ei->i_acl = EXT4_ACL_NOT_CACHED;
2674         ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2675 #endif
2676         ei->i_block_alloc_info = NULL;
2677
2678         if (__ext4_get_inode_loc(inode, &iloc, 0))
2679                 goto bad_inode;
2680         bh = iloc.bh;
2681         raw_inode = ext4_raw_inode(&iloc);
2682         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2683         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2684         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2685         if(!(test_opt (inode->i_sb, NO_UID32))) {
2686                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2687                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2688         }
2689         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2690         inode->i_size = le32_to_cpu(raw_inode->i_size);
2691
2692         ei->i_state = 0;
2693         ei->i_dir_start_lookup = 0;
2694         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2695         /* We now have enough fields to check if the inode was active or not.
2696          * This is needed because nfsd might try to access dead inodes
2697          * the test is that same one that e2fsck uses
2698          * NeilBrown 1999oct15
2699          */
2700         if (inode->i_nlink == 0) {
2701                 if (inode->i_mode == 0 ||
2702                     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2703                         /* this inode is deleted */
2704                         brelse (bh);
2705                         goto bad_inode;
2706                 }
2707                 /* The only unlinked inodes we let through here have
2708                  * valid i_mode and are being read by the orphan
2709                  * recovery code: that's fine, we're about to complete
2710                  * the process of deleting those. */
2711         }
2712         inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2713         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2714         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2715         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2716             cpu_to_le32(EXT4_OS_HURD))
2717                 ei->i_file_acl |=
2718                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2719         if (!S_ISREG(inode->i_mode)) {
2720                 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2721         } else {
2722                 inode->i_size |=
2723                         ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2724         }
2725         ei->i_disksize = inode->i_size;
2726         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2727         ei->i_block_group = iloc.block_group;
2728         /*
2729          * NOTE! The in-memory inode i_data array is in little-endian order
2730          * even on big-endian machines: we do NOT byteswap the block numbers!
2731          */
2732         for (block = 0; block < EXT4_N_BLOCKS; block++)
2733                 ei->i_data[block] = raw_inode->i_block[block];
2734         INIT_LIST_HEAD(&ei->i_orphan);
2735
2736         if (inode->i_ino >= EXT4_FIRST_INO(inode->i_sb) + 1 &&
2737             EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2738                 /*
2739                  * When mke2fs creates big inodes it does not zero out
2740                  * the unused bytes above EXT4_GOOD_OLD_INODE_SIZE,
2741                  * so ignore those first few inodes.
2742                  */
2743                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2744                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2745                     EXT4_INODE_SIZE(inode->i_sb)) {
2746                         brelse (bh);
2747                         goto bad_inode;
2748                 }
2749                 if (ei->i_extra_isize == 0) {
2750                         /* The extra space is currently unused. Use it. */
2751                         ei->i_extra_isize = sizeof(struct ext4_inode) -
2752                                             EXT4_GOOD_OLD_INODE_SIZE;
2753                 } else {
2754                         __le32 *magic = (void *)raw_inode +
2755                                         EXT4_GOOD_OLD_INODE_SIZE +
2756                                         ei->i_extra_isize;
2757                         if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2758                                  ei->i_state |= EXT4_STATE_XATTR;
2759                 }
2760         } else
2761                 ei->i_extra_isize = 0;
2762
2763         EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2764         EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2765         EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2766         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2767
2768         if (S_ISREG(inode->i_mode)) {
2769                 inode->i_op = &ext4_file_inode_operations;
2770                 inode->i_fop = &ext4_file_operations;
2771                 ext4_set_aops(inode);
2772         } else if (S_ISDIR(inode->i_mode)) {
2773                 inode->i_op = &ext4_dir_inode_operations;
2774                 inode->i_fop = &ext4_dir_operations;
2775         } else if (S_ISLNK(inode->i_mode)) {
2776                 if (ext4_inode_is_fast_symlink(inode))
2777                         inode->i_op = &ext4_fast_symlink_inode_operations;
2778                 else {
2779                         inode->i_op = &ext4_symlink_inode_operations;
2780                         ext4_set_aops(inode);
2781                 }
2782         } else {
2783                 inode->i_op = &ext4_special_inode_operations;
2784                 if (raw_inode->i_block[0])
2785                         init_special_inode(inode, inode->i_mode,
2786                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2787                 else
2788                         init_special_inode(inode, inode->i_mode,
2789                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2790         }
2791         brelse (iloc.bh);
2792         ext4_set_inode_flags(inode);
2793         return;
2794
2795 bad_inode:
2796         make_bad_inode(inode);
2797         return;
2798 }
2799
2800 /*
2801  * Post the struct inode info into an on-disk inode location in the
2802  * buffer-cache.  This gobbles the caller's reference to the
2803  * buffer_head in the inode location struct.
2804  *
2805  * The caller must have write access to iloc->bh.
2806  */
2807 static int ext4_do_update_inode(handle_t *handle,
2808                                 struct inode *inode,
2809                                 struct ext4_iloc *iloc)
2810 {
2811         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2812         struct ext4_inode_info *ei = EXT4_I(inode);
2813         struct buffer_head *bh = iloc->bh;
2814         int err = 0, rc, block;
2815
2816         /* For fields not not tracking in the in-memory inode,
2817          * initialise them to zero for new inodes. */
2818         if (ei->i_state & EXT4_STATE_NEW)
2819                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2820
2821         ext4_get_inode_flags(ei);
2822         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2823         if(!(test_opt(inode->i_sb, NO_UID32))) {
2824                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2825                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2826 /*
2827  * Fix up interoperability with old kernels. Otherwise, old inodes get
2828  * re-used with the upper 16 bits of the uid/gid intact
2829  */
2830                 if(!ei->i_dtime) {
2831                         raw_inode->i_uid_high =
2832                                 cpu_to_le16(high_16_bits(inode->i_uid));
2833                         raw_inode->i_gid_high =
2834                                 cpu_to_le16(high_16_bits(inode->i_gid));
2835                 } else {
2836                         raw_inode->i_uid_high = 0;
2837                         raw_inode->i_gid_high = 0;
2838                 }
2839         } else {
2840                 raw_inode->i_uid_low =
2841                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2842                 raw_inode->i_gid_low =
2843                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2844                 raw_inode->i_uid_high = 0;
2845                 raw_inode->i_gid_high = 0;
2846         }
2847         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2848         raw_inode->i_size = cpu_to_le32(ei->i_disksize);
2849
2850         EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2851         EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2852         EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2853         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2854
2855         raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2856         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2857         raw_inode->i_flags = cpu_to_le32(ei->i_flags);
2858         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2859             cpu_to_le32(EXT4_OS_HURD))
2860                 raw_inode->i_file_acl_high =
2861                         cpu_to_le16(ei->i_file_acl >> 32);
2862         raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
2863         if (!S_ISREG(inode->i_mode)) {
2864                 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
2865         } else {
2866                 raw_inode->i_size_high =
2867                         cpu_to_le32(ei->i_disksize >> 32);
2868                 if (ei->i_disksize > 0x7fffffffULL) {
2869                         struct super_block *sb = inode->i_sb;
2870                         if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2871                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2872                             EXT4_SB(sb)->s_es->s_rev_level ==
2873                                         cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2874                                /* If this is the first large file
2875                                 * created, add a flag to the superblock.
2876                                 */
2877                                 err = ext4_journal_get_write_access(handle,
2878                                                 EXT4_SB(sb)->s_sbh);
2879                                 if (err)
2880                                         goto out_brelse;
2881                                 ext4_update_dynamic_rev(sb);
2882                                 EXT4_SET_RO_COMPAT_FEATURE(sb,
2883                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2884                                 sb->s_dirt = 1;
2885                                 handle->h_sync = 1;
2886                                 err = ext4_journal_dirty_metadata(handle,
2887                                                 EXT4_SB(sb)->s_sbh);
2888                         }
2889                 }
2890         }
2891         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2892         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2893                 if (old_valid_dev(inode->i_rdev)) {
2894                         raw_inode->i_block[0] =
2895                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2896                         raw_inode->i_block[1] = 0;
2897                 } else {
2898                         raw_inode->i_block[0] = 0;
2899                         raw_inode->i_block[1] =
2900                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2901                         raw_inode->i_block[2] = 0;
2902                 }
2903         } else for (block = 0; block < EXT4_N_BLOCKS; block++)
2904                 raw_inode->i_block[block] = ei->i_data[block];
2905
2906         if (ei->i_extra_isize)
2907                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
2908
2909         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2910         rc = ext4_journal_dirty_metadata(handle, bh);
2911         if (!err)
2912                 err = rc;
2913         ei->i_state &= ~EXT4_STATE_NEW;
2914
2915 out_brelse:
2916         brelse (bh);
2917         ext4_std_error(inode->i_sb, err);
2918         return err;
2919 }
2920
2921 /*
2922  * ext4_write_inode()
2923  *
2924  * We are called from a few places:
2925  *
2926  * - Within generic_file_write() for O_SYNC files.
2927  *   Here, there will be no transaction running. We wait for any running
2928  *   trasnaction to commit.
2929  *
2930  * - Within sys_sync(), kupdate and such.
2931  *   We wait on commit, if tol to.
2932  *
2933  * - Within prune_icache() (PF_MEMALLOC == true)
2934  *   Here we simply return.  We can't afford to block kswapd on the
2935  *   journal commit.
2936  *
2937  * In all cases it is actually safe for us to return without doing anything,
2938  * because the inode has been copied into a raw inode buffer in
2939  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
2940  * knfsd.
2941  *
2942  * Note that we are absolutely dependent upon all inode dirtiers doing the
2943  * right thing: they *must* call mark_inode_dirty() after dirtying info in
2944  * which we are interested.
2945  *
2946  * It would be a bug for them to not do this.  The code:
2947  *
2948  *      mark_inode_dirty(inode)
2949  *      stuff();
2950  *      inode->i_size = expr;
2951  *
2952  * is in error because a kswapd-driven write_inode() could occur while
2953  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
2954  * will no longer be on the superblock's dirty inode list.
2955  */
2956 int ext4_write_inode(struct inode *inode, int wait)
2957 {
2958         if (current->flags & PF_MEMALLOC)
2959                 return 0;
2960
2961         if (ext4_journal_current_handle()) {
2962                 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
2963                 dump_stack();
2964                 return -EIO;
2965         }
2966
2967         if (!wait)
2968                 return 0;
2969
2970         return ext4_force_commit(inode->i_sb);
2971 }
2972
2973 /*
2974  * ext4_setattr()
2975  *
2976  * Called from notify_change.
2977  *
2978  * We want to trap VFS attempts to truncate the file as soon as
2979  * possible.  In particular, we want to make sure that when the VFS
2980  * shrinks i_size, we put the inode on the orphan list and modify
2981  * i_disksize immediately, so that during the subsequent flushing of
2982  * dirty pages and freeing of disk blocks, we can guarantee that any
2983  * commit will leave the blocks being flushed in an unused state on
2984  * disk.  (On recovery, the inode will get truncated and the blocks will
2985  * be freed, so we have a strong guarantee that no future commit will
2986  * leave these blocks visible to the user.)
2987  *
2988  * Called with inode->sem down.
2989  */
2990 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
2991 {
2992         struct inode *inode = dentry->d_inode;
2993         int error, rc = 0;
2994         const unsigned int ia_valid = attr->ia_valid;
2995
2996         error = inode_change_ok(inode, attr);
2997         if (error)
2998                 return error;
2999
3000         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3001                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3002                 handle_t *handle;
3003
3004                 /* (user+group)*(old+new) structure, inode write (sb,
3005                  * inode block, ? - but truncate inode update has it) */
3006                 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3007                                         EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3008                 if (IS_ERR(handle)) {
3009                         error = PTR_ERR(handle);
3010                         goto err_out;
3011                 }
3012                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3013                 if (error) {
3014                         ext4_journal_stop(handle);
3015                         return error;
3016                 }
3017                 /* Update corresponding info in inode so that everything is in
3018                  * one transaction */
3019                 if (attr->ia_valid & ATTR_UID)
3020                         inode->i_uid = attr->ia_uid;
3021                 if (attr->ia_valid & ATTR_GID)
3022                         inode->i_gid = attr->ia_gid;
3023                 error = ext4_mark_inode_dirty(handle, inode);
3024                 ext4_journal_stop(handle);
3025         }
3026
3027         if (S_ISREG(inode->i_mode) &&
3028             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3029                 handle_t *handle;
3030
3031                 handle = ext4_journal_start(inode, 3);
3032                 if (IS_ERR(handle)) {
3033                         error = PTR_ERR(handle);
3034                         goto err_out;
3035                 }
3036
3037                 error = ext4_orphan_add(handle, inode);
3038                 EXT4_I(inode)->i_disksize = attr->ia_size;
3039                 rc = ext4_mark_inode_dirty(handle, inode);
3040                 if (!error)
3041                         error = rc;
3042                 ext4_journal_stop(handle);
3043         }
3044
3045         rc = inode_setattr(inode, attr);
3046
3047         /* If inode_setattr's call to ext4_truncate failed to get a
3048          * transaction handle at all, we need to clean up the in-core
3049          * orphan list manually. */
3050         if (inode->i_nlink)
3051                 ext4_orphan_del(NULL, inode);
3052
3053         if (!rc && (ia_valid & ATTR_MODE))
3054                 rc = ext4_acl_chmod(inode);
3055
3056 err_out:
3057         ext4_std_error(inode->i_sb, error);
3058         if (!error)
3059                 error = rc;
3060         return error;
3061 }
3062
3063
3064 /*
3065  * How many blocks doth make a writepage()?
3066  *
3067  * With N blocks per page, it may be:
3068  * N data blocks
3069  * 2 indirect block
3070  * 2 dindirect
3071  * 1 tindirect
3072  * N+5 bitmap blocks (from the above)
3073  * N+5 group descriptor summary blocks
3074  * 1 inode block
3075  * 1 superblock.
3076  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3077  *
3078  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3079  *
3080  * With ordered or writeback data it's the same, less the N data blocks.
3081  *
3082  * If the inode's direct blocks can hold an integral number of pages then a
3083  * page cannot straddle two indirect blocks, and we can only touch one indirect
3084  * and dindirect block, and the "5" above becomes "3".
3085  *
3086  * This still overestimates under most circumstances.  If we were to pass the
3087  * start and end offsets in here as well we could do block_to_path() on each
3088  * block and work out the exact number of indirects which are touched.  Pah.
3089  */
3090
3091 int ext4_writepage_trans_blocks(struct inode *inode)
3092 {
3093         int bpp = ext4_journal_blocks_per_page(inode);
3094         int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3095         int ret;
3096
3097         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3098                 return ext4_ext_writepage_trans_blocks(inode, bpp);
3099
3100         if (ext4_should_journal_data(inode))
3101                 ret = 3 * (bpp + indirects) + 2;
3102         else
3103                 ret = 2 * (bpp + indirects) + 2;
3104
3105 #ifdef CONFIG_QUOTA
3106         /* We know that structure was already allocated during DQUOT_INIT so
3107          * we will be updating only the data blocks + inodes */
3108         ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3109 #endif
3110
3111         return ret;
3112 }
3113
3114 /*
3115  * The caller must have previously called ext4_reserve_inode_write().
3116  * Give this, we know that the caller already has write access to iloc->bh.
3117  */
3118 int ext4_mark_iloc_dirty(handle_t *handle,
3119                 struct inode *inode, struct ext4_iloc *iloc)
3120 {
3121         int err = 0;
3122
3123         /* the do_update_inode consumes one bh->b_count */
3124         get_bh(iloc->bh);
3125
3126         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3127         err = ext4_do_update_inode(handle, inode, iloc);
3128         put_bh(iloc->bh);
3129         return err;
3130 }
3131
3132 /*
3133  * On success, We end up with an outstanding reference count against
3134  * iloc->bh.  This _must_ be cleaned up later.
3135  */
3136
3137 int
3138 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3139                          struct ext4_iloc *iloc)
3140 {
3141         int err = 0;
3142         if (handle) {
3143                 err = ext4_get_inode_loc(inode, iloc);
3144                 if (!err) {
3145                         BUFFER_TRACE(iloc->bh, "get_write_access");
3146                         err = ext4_journal_get_write_access(handle, iloc->bh);
3147                         if (err) {
3148                                 brelse(iloc->bh);
3149                                 iloc->bh = NULL;
3150                         }
3151                 }
3152         }
3153         ext4_std_error(inode->i_sb, err);
3154         return err;
3155 }
3156
3157 /*
3158  * Expand an inode by new_extra_isize bytes.
3159  * Returns 0 on success or negative error number on failure.
3160  */
3161 int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize,
3162                         struct ext4_iloc iloc, handle_t *handle)
3163 {
3164         struct ext4_inode *raw_inode;
3165         struct ext4_xattr_ibody_header *header;
3166         struct ext4_xattr_entry *entry;
3167
3168         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3169                 return 0;
3170
3171         raw_inode = ext4_raw_inode(&iloc);
3172
3173         header = IHDR(inode, raw_inode);
3174         entry = IFIRST(header);
3175
3176         /* No extended attributes present */
3177         if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3178                 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3179                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3180                         new_extra_isize);
3181                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3182                 return 0;
3183         }
3184
3185         /* try to expand with EAs present */
3186         return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3187                                           raw_inode, handle);
3188 }
3189
3190 /*
3191  * What we do here is to mark the in-core inode as clean with respect to inode
3192  * dirtiness (it may still be data-dirty).
3193  * This means that the in-core inode may be reaped by prune_icache
3194  * without having to perform any I/O.  This is a very good thing,
3195  * because *any* task may call prune_icache - even ones which
3196  * have a transaction open against a different journal.
3197  *
3198  * Is this cheating?  Not really.  Sure, we haven't written the
3199  * inode out, but prune_icache isn't a user-visible syncing function.
3200  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3201  * we start and wait on commits.
3202  *
3203  * Is this efficient/effective?  Well, we're being nice to the system
3204  * by cleaning up our inodes proactively so they can be reaped
3205  * without I/O.  But we are potentially leaving up to five seconds'
3206  * worth of inodes floating about which prune_icache wants us to
3207  * write out.  One way to fix that would be to get prune_icache()
3208  * to do a write_super() to free up some memory.  It has the desired
3209  * effect.
3210  */
3211 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3212 {
3213         struct ext4_iloc iloc;
3214         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3215         static unsigned int mnt_count;
3216         int err, ret;
3217
3218         might_sleep();
3219         err = ext4_reserve_inode_write(handle, inode, &iloc);
3220         if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3221             !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3222                 /*
3223                  * We need extra buffer credits since we may write into EA block
3224                  * with this same handle. If journal_extend fails, then it will
3225                  * only result in a minor loss of functionality for that inode.
3226                  * If this is felt to be critical, then e2fsck should be run to
3227                  * force a large enough s_min_extra_isize.
3228                  */
3229                 if ((jbd2_journal_extend(handle,
3230                              EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3231                         ret = ext4_expand_extra_isize(inode,
3232                                                       sbi->s_want_extra_isize,
3233                                                       iloc, handle);
3234                         if (ret) {
3235                                 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3236                                 if (mnt_count !=
3237                                         le16_to_cpu(sbi->s_es->s_mnt_count)) {
3238                                         ext4_warning(inode->i_sb, __FUNCTION__,
3239                                         "Unable to expand inode %lu. Delete"
3240                                         " some EAs or run e2fsck.",
3241                                         inode->i_ino);
3242                                         mnt_count =
3243                                           le16_to_cpu(sbi->s_es->s_mnt_count);
3244                                 }
3245                         }
3246                 }
3247         }
3248         if (!err)
3249                 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3250         return err;
3251 }
3252
3253 /*
3254  * ext4_dirty_inode() is called from __mark_inode_dirty()
3255  *
3256  * We're really interested in the case where a file is being extended.
3257  * i_size has been changed by generic_commit_write() and we thus need
3258  * to include the updated inode in the current transaction.
3259  *
3260  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3261  * are allocated to the file.
3262  *
3263  * If the inode is marked synchronous, we don't honour that here - doing
3264  * so would cause a commit on atime updates, which we don't bother doing.
3265  * We handle synchronous inodes at the highest possible level.
3266  */
3267 void ext4_dirty_inode(struct inode *inode)
3268 {
3269         handle_t *current_handle = ext4_journal_current_handle();
3270         handle_t *handle;
3271
3272         handle = ext4_journal_start(inode, 2);
3273         if (IS_ERR(handle))
3274                 goto out;
3275         if (current_handle &&
3276                 current_handle->h_transaction != handle->h_transaction) {
3277                 /* This task has a transaction open against a different fs */
3278                 printk(KERN_EMERG "%s: transactions do not match!\n",
3279                        __FUNCTION__);
3280         } else {
3281                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3282                                 current_handle);
3283                 ext4_mark_inode_dirty(handle, inode);
3284         }
3285         ext4_journal_stop(handle);
3286 out:
3287         return;
3288 }
3289
3290 #if 0
3291 /*
3292  * Bind an inode's backing buffer_head into this transaction, to prevent
3293  * it from being flushed to disk early.  Unlike
3294  * ext4_reserve_inode_write, this leaves behind no bh reference and
3295  * returns no iloc structure, so the caller needs to repeat the iloc
3296  * lookup to mark the inode dirty later.
3297  */
3298 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3299 {
3300         struct ext4_iloc iloc;
3301
3302         int err = 0;
3303         if (handle) {
3304                 err = ext4_get_inode_loc(inode, &iloc);
3305                 if (!err) {
3306                         BUFFER_TRACE(iloc.bh, "get_write_access");
3307                         err = jbd2_journal_get_write_access(handle, iloc.bh);
3308                         if (!err)
3309                                 err = ext4_journal_dirty_metadata(handle,
3310                                                                   iloc.bh);
3311                         brelse(iloc.bh);
3312                 }
3313         }
3314         ext4_std_error(inode->i_sb, err);
3315         return err;
3316 }
3317 #endif
3318
3319 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3320 {
3321         journal_t *journal;
3322         handle_t *handle;
3323         int err;
3324
3325         /*
3326          * We have to be very careful here: changing a data block's
3327          * journaling status dynamically is dangerous.  If we write a
3328          * data block to the journal, change the status and then delete
3329          * that block, we risk forgetting to revoke the old log record
3330          * from the journal and so a subsequent replay can corrupt data.
3331          * So, first we make sure that the journal is empty and that
3332          * nobody is changing anything.
3333          */
3334
3335         journal = EXT4_JOURNAL(inode);
3336         if (is_journal_aborted(journal))
3337                 return -EROFS;
3338
3339         jbd2_journal_lock_updates(journal);
3340         jbd2_journal_flush(journal);
3341
3342         /*
3343          * OK, there are no updates running now, and all cached data is
3344          * synced to disk.  We are now in a completely consistent state
3345          * which doesn't have anything in the journal, and we know that
3346          * no filesystem updates are running, so it is safe to modify
3347          * the inode's in-core data-journaling state flag now.
3348          */
3349
3350         if (val)
3351                 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3352         else
3353                 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3354         ext4_set_aops(inode);
3355
3356         jbd2_journal_unlock_updates(journal);
3357
3358         /* Finally we can mark the inode as dirty. */
3359
3360         handle = ext4_journal_start(inode, 1);
3361         if (IS_ERR(handle))
3362                 return PTR_ERR(handle);
3363
3364         err = ext4_mark_inode_dirty(handle, inode);
3365         handle->h_sync = 1;
3366         ext4_journal_stop(handle);
3367         ext4_std_error(inode->i_sb, err);
3368
3369         return err;
3370 }