2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/stat.h>
19 #include <linux/string.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22 #include <linux/random.h>
23 #include <linux/bitops.h>
24 #include <linux/blkdev.h>
25 #include <asm/byteorder.h>
27 #include "ext4_jbd2.h"
33 * ialloc.c contains the inodes allocation and deallocation routines
37 * The free inodes are managed by bitmaps. A file system contains several
38 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
39 * block for inodes, N blocks for the inode table and data blocks.
41 * The file system contains group descriptors which are located after the
42 * super block. Each descriptor contains the number of the bitmap block and
43 * the free blocks count in the block.
47 * To avoid calling the atomic setbit hundreds or thousands of times, we only
48 * need to use it within a single byte (to ensure we get endianness right).
49 * We can use memset for the rest of the bitmap as there are no other users.
51 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
55 if (start_bit >= end_bit)
58 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
59 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
60 ext4_set_bit(i, bitmap);
62 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
65 /* Initializes an uninitialized inode bitmap */
66 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
67 ext4_group_t block_group,
68 struct ext4_group_desc *gdp)
70 struct ext4_sb_info *sbi = EXT4_SB(sb);
72 J_ASSERT_BH(bh, buffer_locked(bh));
74 /* If checksum is bad mark all blocks and inodes use to prevent
75 * allocation, essentially implementing a per-group read-only flag. */
76 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
77 ext4_error(sb, __func__, "Checksum bad for group %u",
79 ext4_free_blks_set(sb, gdp, 0);
80 ext4_free_inodes_set(sb, gdp, 0);
81 ext4_itable_unused_set(sb, gdp, 0);
82 memset(bh->b_data, 0xff, sb->s_blocksize);
86 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
87 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
90 return EXT4_INODES_PER_GROUP(sb);
94 * Read the inode allocation bitmap for a given block_group, reading
95 * into the specified slot in the superblock's bitmap cache.
97 * Return buffer_head of bitmap on success or NULL.
99 static struct buffer_head *
100 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
102 struct ext4_group_desc *desc;
103 struct buffer_head *bh = NULL;
104 ext4_fsblk_t bitmap_blk;
106 desc = ext4_get_group_desc(sb, block_group, NULL);
109 bitmap_blk = ext4_inode_bitmap(sb, desc);
110 bh = sb_getblk(sb, bitmap_blk);
112 ext4_error(sb, __func__,
113 "Cannot read inode bitmap - "
114 "block_group = %u, inode_bitmap = %llu",
115 block_group, bitmap_blk);
118 if (buffer_uptodate(bh) &&
119 !(desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)))
123 spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
124 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
125 ext4_init_inode_bitmap(sb, bh, block_group, desc);
126 set_buffer_uptodate(bh);
127 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
131 spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
132 if (bh_submit_read(bh) < 0) {
134 ext4_error(sb, __func__,
135 "Cannot read inode bitmap - "
136 "block_group = %u, inode_bitmap = %llu",
137 block_group, bitmap_blk);
144 * NOTE! When we get the inode, we're the only people
145 * that have access to it, and as such there are no
146 * race conditions we have to worry about. The inode
147 * is not on the hash-lists, and it cannot be reached
148 * through the filesystem because the directory entry
149 * has been deleted earlier.
151 * HOWEVER: we must make sure that we get no aliases,
152 * which means that we have to call "clear_inode()"
153 * _before_ we mark the inode not in use in the inode
154 * bitmaps. Otherwise a newly created file might use
155 * the same inode number (not actually the same pointer
156 * though), and then we'd have two inodes sharing the
157 * same inode number and space on the harddisk.
159 void ext4_free_inode(handle_t *handle, struct inode *inode)
161 struct super_block *sb = inode->i_sb;
164 struct buffer_head *bitmap_bh = NULL;
165 struct buffer_head *bh2;
166 ext4_group_t block_group;
168 struct ext4_group_desc *gdp;
169 struct ext4_super_block *es;
170 struct ext4_sb_info *sbi;
171 int fatal = 0, err, count;
172 ext4_group_t flex_group;
174 if (atomic_read(&inode->i_count) > 1) {
175 printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
176 atomic_read(&inode->i_count));
179 if (inode->i_nlink) {
180 printk(KERN_ERR "ext4_free_inode: inode has nlink=%d\n",
185 printk(KERN_ERR "ext4_free_inode: inode on "
186 "nonexistent device\n");
192 ext4_debug("freeing inode %lu\n", ino);
195 * Note: we must free any quota before locking the superblock,
196 * as writing the quota to disk may need the lock as well.
199 ext4_xattr_delete_inode(handle, inode);
200 DQUOT_FREE_INODE(inode);
203 is_directory = S_ISDIR(inode->i_mode);
205 /* Do this BEFORE marking the inode not in use or returning an error */
208 es = EXT4_SB(sb)->s_es;
209 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
210 ext4_error(sb, "ext4_free_inode",
211 "reserved or nonexistent inode %lu", ino);
214 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
215 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
216 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
220 BUFFER_TRACE(bitmap_bh, "get_write_access");
221 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
225 /* Ok, now we can actually update the inode bitmaps.. */
226 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
227 bit, bitmap_bh->b_data))
228 ext4_error(sb, "ext4_free_inode",
229 "bit already cleared for inode %lu", ino);
231 gdp = ext4_get_group_desc(sb, block_group, &bh2);
233 BUFFER_TRACE(bh2, "get_write_access");
234 fatal = ext4_journal_get_write_access(handle, bh2);
235 if (fatal) goto error_return;
238 spin_lock(sb_bgl_lock(sbi, block_group));
239 count = ext4_free_inodes_count(sb, gdp) + 1;
240 ext4_free_inodes_set(sb, gdp, count);
242 count = ext4_used_dirs_count(sb, gdp) - 1;
243 ext4_used_dirs_set(sb, gdp, count);
245 gdp->bg_checksum = ext4_group_desc_csum(sbi,
247 spin_unlock(sb_bgl_lock(sbi, block_group));
248 percpu_counter_inc(&sbi->s_freeinodes_counter);
250 percpu_counter_dec(&sbi->s_dirs_counter);
252 if (sbi->s_log_groups_per_flex) {
253 flex_group = ext4_flex_group(sbi, block_group);
254 spin_lock(sb_bgl_lock(sbi, flex_group));
255 sbi->s_flex_groups[flex_group].free_inodes++;
256 spin_unlock(sb_bgl_lock(sbi, flex_group));
259 BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
260 err = ext4_handle_dirty_metadata(handle, NULL, bh2);
261 if (!fatal) fatal = err;
263 BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
264 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
270 ext4_std_error(sb, fatal);
274 * There are two policies for allocating an inode. If the new inode is
275 * a directory, then a forward search is made for a block group with both
276 * free space and a low directory-to-inode ratio; if that fails, then of
277 * the groups with above-average free space, that group with the fewest
278 * directories already is chosen.
280 * For other inodes, search forward from the parent directory\'s block
281 * group to find a free inode.
283 static int find_group_dir(struct super_block *sb, struct inode *parent,
284 ext4_group_t *best_group)
286 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
287 unsigned int freei, avefreei;
288 struct ext4_group_desc *desc, *best_desc = NULL;
292 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
293 avefreei = freei / ngroups;
295 for (group = 0; group < ngroups; group++) {
296 desc = ext4_get_group_desc(sb, group, NULL);
297 if (!desc || !ext4_free_inodes_count(sb, desc))
299 if (ext4_free_inodes_count(sb, desc) < avefreei)
302 (ext4_free_blks_count(sb, desc) >
303 ext4_free_blks_count(sb, best_desc))) {
312 #define free_block_ratio 10
314 static int find_group_flex(struct super_block *sb, struct inode *parent,
315 ext4_group_t *best_group)
317 struct ext4_sb_info *sbi = EXT4_SB(sb);
318 struct ext4_group_desc *desc;
319 struct buffer_head *bh;
320 struct flex_groups *flex_group = sbi->s_flex_groups;
321 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
322 ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
323 ext4_group_t ngroups = sbi->s_groups_count;
324 int flex_size = ext4_flex_bg_size(sbi);
325 ext4_group_t best_flex = parent_fbg_group;
326 int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
327 int flexbg_free_blocks;
328 int flex_freeb_ratio;
329 ext4_group_t n_fbg_groups;
332 n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
333 sbi->s_log_groups_per_flex;
335 find_close_to_parent:
336 flexbg_free_blocks = flex_group[best_flex].free_blocks;
337 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
338 if (flex_group[best_flex].free_inodes &&
339 flex_freeb_ratio > free_block_ratio)
342 if (best_flex && best_flex == parent_fbg_group) {
344 goto find_close_to_parent;
347 for (i = 0; i < n_fbg_groups; i++) {
348 if (i == parent_fbg_group || i == parent_fbg_group - 1)
351 flexbg_free_blocks = flex_group[i].free_blocks;
352 flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
354 if (flex_freeb_ratio > free_block_ratio &&
355 flex_group[i].free_inodes) {
360 if (flex_group[best_flex].free_inodes == 0 ||
361 (flex_group[i].free_blocks >
362 flex_group[best_flex].free_blocks &&
363 flex_group[i].free_inodes))
367 if (!flex_group[best_flex].free_inodes ||
368 !flex_group[best_flex].free_blocks)
372 for (i = best_flex * flex_size; i < ngroups &&
373 i < (best_flex + 1) * flex_size; i++) {
374 desc = ext4_get_group_desc(sb, i, &bh);
375 if (ext4_free_inodes_count(sb, desc)) {
387 * Orlov's allocator for directories.
389 * We always try to spread first-level directories.
391 * If there are blockgroups with both free inodes and free blocks counts
392 * not worse than average we return one with smallest directory count.
393 * Otherwise we simply return a random group.
395 * For the rest rules look so:
397 * It's OK to put directory into a group unless
398 * it has too many directories already (max_dirs) or
399 * it has too few free inodes left (min_inodes) or
400 * it has too few free blocks left (min_blocks) or
401 * it's already running too large debt (max_debt).
402 * Parent's group is preferred, if it doesn't satisfy these
403 * conditions we search cyclically through the rest. If none
404 * of the groups look good we just look for a group with more
405 * free inodes than average (starting at parent's group).
407 * Debt is incremented each time we allocate a directory and decremented
408 * when we allocate an inode, within 0--255.
411 #define INODE_COST 64
412 #define BLOCK_COST 256
414 static int find_group_orlov(struct super_block *sb, struct inode *parent,
417 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
418 struct ext4_sb_info *sbi = EXT4_SB(sb);
419 struct ext4_super_block *es = sbi->s_es;
420 ext4_group_t ngroups = sbi->s_groups_count;
421 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
422 unsigned int freei, avefreei;
423 ext4_fsblk_t freeb, avefreeb;
424 ext4_fsblk_t blocks_per_dir;
426 int max_debt, max_dirs, min_inodes;
427 ext4_grpblk_t min_blocks;
429 struct ext4_group_desc *desc;
431 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
432 avefreei = freei / ngroups;
433 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
435 do_div(avefreeb, ngroups);
436 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
438 if ((parent == sb->s_root->d_inode) ||
439 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
440 int best_ndir = inodes_per_group;
444 get_random_bytes(&grp, sizeof(grp));
445 parent_group = (unsigned)grp % ngroups;
446 for (i = 0; i < ngroups; i++) {
447 grp = (parent_group + i) % ngroups;
448 desc = ext4_get_group_desc(sb, grp, NULL);
449 if (!desc || !ext4_free_inodes_count(sb, desc))
451 if (ext4_used_dirs_count(sb, desc) >= best_ndir)
453 if (ext4_free_inodes_count(sb, desc) < avefreei)
455 if (ext4_free_blks_count(sb, desc) < avefreeb)
459 best_ndir = ext4_used_dirs_count(sb, desc);
466 blocks_per_dir = ext4_blocks_count(es) - freeb;
467 do_div(blocks_per_dir, ndirs);
469 max_dirs = ndirs / ngroups + inodes_per_group / 16;
470 min_inodes = avefreei - inodes_per_group / 4;
471 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
473 max_debt = EXT4_BLOCKS_PER_GROUP(sb);
474 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
475 if (max_debt * INODE_COST > inodes_per_group)
476 max_debt = inodes_per_group / INODE_COST;
482 for (i = 0; i < ngroups; i++) {
483 *group = (parent_group + i) % ngroups;
484 desc = ext4_get_group_desc(sb, *group, NULL);
485 if (!desc || !ext4_free_inodes_count(sb, desc))
487 if (ext4_used_dirs_count(sb, desc) >= max_dirs)
489 if (ext4_free_inodes_count(sb, desc) < min_inodes)
491 if (ext4_free_blks_count(sb, desc) < min_blocks)
497 for (i = 0; i < ngroups; i++) {
498 *group = (parent_group + i) % ngroups;
499 desc = ext4_get_group_desc(sb, *group, NULL);
500 if (desc && ext4_free_inodes_count(sb, desc) &&
501 ext4_free_inodes_count(sb, desc) >= avefreei)
507 * The free-inodes counter is approximate, and for really small
508 * filesystems the above test can fail to find any blockgroups
517 static int find_group_other(struct super_block *sb, struct inode *parent,
520 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
521 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
522 struct ext4_group_desc *desc;
526 * Try to place the inode in its parent directory
528 *group = parent_group;
529 desc = ext4_get_group_desc(sb, *group, NULL);
530 if (desc && ext4_free_inodes_count(sb, desc) &&
531 ext4_free_blks_count(sb, desc))
535 * We're going to place this inode in a different blockgroup from its
536 * parent. We want to cause files in a common directory to all land in
537 * the same blockgroup. But we want files which are in a different
538 * directory which shares a blockgroup with our parent to land in a
539 * different blockgroup.
541 * So add our directory's i_ino into the starting point for the hash.
543 *group = (*group + parent->i_ino) % ngroups;
546 * Use a quadratic hash to find a group with a free inode and some free
549 for (i = 1; i < ngroups; i <<= 1) {
551 if (*group >= ngroups)
553 desc = ext4_get_group_desc(sb, *group, NULL);
554 if (desc && ext4_free_inodes_count(sb, desc) &&
555 ext4_free_blks_count(sb, desc))
560 * That failed: try linear search for a free inode, even if that group
561 * has no free blocks.
563 *group = parent_group;
564 for (i = 0; i < ngroups; i++) {
565 if (++*group >= ngroups)
567 desc = ext4_get_group_desc(sb, *group, NULL);
568 if (desc && ext4_free_inodes_count(sb, desc))
576 * There are two policies for allocating an inode. If the new inode is
577 * a directory, then a forward search is made for a block group with both
578 * free space and a low directory-to-inode ratio; if that fails, then of
579 * the groups with above-average free space, that group with the fewest
580 * directories already is chosen.
582 * For other inodes, search forward from the parent directory's block
583 * group to find a free inode.
585 struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
587 struct super_block *sb;
588 struct buffer_head *inode_bitmap_bh = NULL;
589 struct buffer_head *group_desc_bh;
590 ext4_group_t group = 0;
591 unsigned long ino = 0;
593 struct ext4_group_desc *gdp = NULL;
594 struct ext4_super_block *es;
595 struct ext4_inode_info *ei;
596 struct ext4_sb_info *sbi;
597 int ret2, err = 0, count;
601 ext4_group_t flex_group;
603 /* Cannot create files in a deleted directory */
604 if (!dir || !dir->i_nlink)
605 return ERR_PTR(-EPERM);
608 inode = new_inode(sb);
610 return ERR_PTR(-ENOMEM);
616 if (sbi->s_log_groups_per_flex) {
617 ret2 = find_group_flex(sb, dir, &group);
622 if (test_opt(sb, OLDALLOC))
623 ret2 = find_group_dir(sb, dir, &group);
625 ret2 = find_group_orlov(sb, dir, &group);
627 ret2 = find_group_other(sb, dir, &group);
634 for (i = 0; i < sbi->s_groups_count; i++) {
637 gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
641 brelse(inode_bitmap_bh);
642 inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
643 if (!inode_bitmap_bh)
648 repeat_in_this_group:
649 ino = ext4_find_next_zero_bit((unsigned long *)
650 inode_bitmap_bh->b_data,
651 EXT4_INODES_PER_GROUP(sb), ino);
653 if (ino < EXT4_INODES_PER_GROUP(sb)) {
655 BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
656 err = ext4_journal_get_write_access(handle,
661 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
662 ino, inode_bitmap_bh->b_data)) {
664 BUFFER_TRACE(inode_bitmap_bh,
665 "call ext4_handle_dirty_metadata");
666 err = ext4_handle_dirty_metadata(handle,
674 ext4_handle_release_buffer(handle, inode_bitmap_bh);
676 if (++ino < EXT4_INODES_PER_GROUP(sb))
677 goto repeat_in_this_group;
681 * This case is possible in concurrent environment. It is very
682 * rare. We cannot repeat the find_group_xxx() call because
683 * that will simply return the same blockgroup, because the
684 * group descriptor metadata has not yet been updated.
685 * So we just go onto the next blockgroup.
687 if (++group == sbi->s_groups_count)
695 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
696 ino > EXT4_INODES_PER_GROUP(sb)) {
697 ext4_error(sb, __func__,
698 "reserved inode or inode > inodes count - "
699 "block_group = %u, inode=%lu", group,
700 ino + group * EXT4_INODES_PER_GROUP(sb));
705 BUFFER_TRACE(group_desc_bh, "get_write_access");
706 err = ext4_journal_get_write_access(handle, group_desc_bh);
710 /* We may have to initialize the block bitmap if it isn't already */
711 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
712 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
713 struct buffer_head *block_bitmap_bh;
715 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
716 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
717 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
719 brelse(block_bitmap_bh);
724 spin_lock(sb_bgl_lock(sbi, group));
725 /* recheck and clear flag under lock if we still need to */
726 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
727 free = ext4_free_blocks_after_init(sb, group, gdp);
728 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
729 ext4_free_blks_set(sb, gdp, free);
730 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
733 spin_unlock(sb_bgl_lock(sbi, group));
735 /* Don't need to dirty bitmap block if we didn't change it */
737 BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
738 err = ext4_handle_dirty_metadata(handle,
739 NULL, block_bitmap_bh);
742 brelse(block_bitmap_bh);
747 spin_lock(sb_bgl_lock(sbi, group));
748 /* If we didn't allocate from within the initialized part of the inode
749 * table then we need to initialize up to this inode. */
750 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
751 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
752 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
754 /* When marking the block group with
755 * ~EXT4_BG_INODE_UNINIT we don't want to depend
756 * on the value of bg_itable_unused even though
757 * mke2fs could have initialized the same for us.
758 * Instead we calculated the value below
763 free = EXT4_INODES_PER_GROUP(sb) -
764 ext4_itable_unused_count(sb, gdp);
768 * Check the relative inode number against the last used
769 * relative inode number in this group. if it is greater
770 * we need to update the bg_itable_unused count
774 ext4_itable_unused_set(sb, gdp,
775 (EXT4_INODES_PER_GROUP(sb) - ino));
778 count = ext4_free_inodes_count(sb, gdp) - 1;
779 ext4_free_inodes_set(sb, gdp, count);
781 count = ext4_used_dirs_count(sb, gdp) + 1;
782 ext4_used_dirs_set(sb, gdp, count);
784 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
785 spin_unlock(sb_bgl_lock(sbi, group));
786 BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
787 err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
790 percpu_counter_dec(&sbi->s_freeinodes_counter);
792 percpu_counter_inc(&sbi->s_dirs_counter);
795 if (sbi->s_log_groups_per_flex) {
796 flex_group = ext4_flex_group(sbi, group);
797 spin_lock(sb_bgl_lock(sbi, flex_group));
798 sbi->s_flex_groups[flex_group].free_inodes--;
799 spin_unlock(sb_bgl_lock(sbi, flex_group));
802 inode->i_uid = current_fsuid();
803 if (test_opt(sb, GRPID))
804 inode->i_gid = dir->i_gid;
805 else if (dir->i_mode & S_ISGID) {
806 inode->i_gid = dir->i_gid;
810 inode->i_gid = current_fsgid();
811 inode->i_mode = mode;
813 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
814 /* This is the optimal IO size (for stat), not the fs block size */
816 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
817 ext4_current_time(inode);
819 memset(ei->i_data, 0, sizeof(ei->i_data));
820 ei->i_dir_start_lookup = 0;
824 * Don't inherit extent flag from directory. We set extent flag on
825 * newly created directory and file only if -o extent mount option is
828 ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
830 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
831 /* dirsync only applies to directories */
833 ei->i_flags &= ~EXT4_DIRSYNC_FL;
836 ei->i_block_group = group;
838 ext4_set_inode_flags(inode);
839 if (IS_DIRSYNC(inode))
840 ext4_handle_sync(handle);
841 if (insert_inode_locked(inode) < 0) {
845 spin_lock(&sbi->s_next_gen_lock);
846 inode->i_generation = sbi->s_next_generation++;
847 spin_unlock(&sbi->s_next_gen_lock);
849 ei->i_state = EXT4_STATE_NEW;
851 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
854 if (DQUOT_ALLOC_INODE(inode)) {
859 err = ext4_init_acl(handle, inode, dir);
863 err = ext4_init_security(handle, inode, dir);
867 if (test_opt(sb, EXTENTS)) {
868 /* set extent flag only for directory, file and normal symlink*/
869 if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
870 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
871 ext4_ext_tree_init(handle, inode);
875 err = ext4_mark_inode_dirty(handle, inode);
877 ext4_std_error(sb, err);
881 ext4_debug("allocating inode %lu\n", inode->i_ino);
884 ext4_std_error(sb, err);
889 brelse(inode_bitmap_bh);
893 DQUOT_FREE_INODE(inode);
897 inode->i_flags |= S_NOQUOTA;
899 unlock_new_inode(inode);
901 brelse(inode_bitmap_bh);
905 /* Verify that we are loading a valid orphan from disk */
906 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
908 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
909 ext4_group_t block_group;
911 struct buffer_head *bitmap_bh;
912 struct inode *inode = NULL;
915 /* Error cases - e2fsck has already cleaned up for us */
917 ext4_warning(sb, __func__,
918 "bad orphan ino %lu! e2fsck was run?", ino);
922 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
923 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
924 bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
926 ext4_warning(sb, __func__,
927 "inode bitmap error for orphan %lu", ino);
931 /* Having the inode bit set should be a 100% indicator that this
932 * is a valid orphan (no e2fsck run on fs). Orphans also include
933 * inodes that were being truncated, so we can't check i_nlink==0.
935 if (!ext4_test_bit(bit, bitmap_bh->b_data))
938 inode = ext4_iget(sb, ino);
943 * If the orphans has i_nlinks > 0 then it should be able to be
944 * truncated, otherwise it won't be removed from the orphan list
945 * during processing and an infinite loop will result.
947 if (inode->i_nlink && !ext4_can_truncate(inode))
950 if (NEXT_ORPHAN(inode) > max_ino)
956 err = PTR_ERR(inode);
959 ext4_warning(sb, __func__,
960 "bad orphan inode %lu! e2fsck was run?", ino);
961 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
962 bit, (unsigned long long)bitmap_bh->b_blocknr,
963 ext4_test_bit(bit, bitmap_bh->b_data));
964 printk(KERN_NOTICE "inode=%p\n", inode);
966 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
967 is_bad_inode(inode));
968 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
970 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
971 printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
972 /* Avoid freeing blocks if we got a bad deleted inode */
973 if (inode->i_nlink == 0)
982 unsigned long ext4_count_free_inodes(struct super_block *sb)
984 unsigned long desc_count;
985 struct ext4_group_desc *gdp;
988 struct ext4_super_block *es;
989 unsigned long bitmap_count, x;
990 struct buffer_head *bitmap_bh = NULL;
992 es = EXT4_SB(sb)->s_es;
996 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
997 gdp = ext4_get_group_desc(sb, i, NULL);
1000 desc_count += ext4_free_inodes_count(sb, gdp);
1002 bitmap_bh = ext4_read_inode_bitmap(sb, i);
1006 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
1007 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1008 i, ext4_free_inodes_count(sb, gdp), x);
1012 printk(KERN_DEBUG "ext4_count_free_inodes: "
1013 "stored = %u, computed = %lu, %lu\n",
1014 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1018 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1019 gdp = ext4_get_group_desc(sb, i, NULL);
1022 desc_count += ext4_free_inodes_count(sb, gdp);
1029 /* Called at mount-time, super-block is locked */
1030 unsigned long ext4_count_dirs(struct super_block * sb)
1032 unsigned long count = 0;
1035 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
1036 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1039 count += ext4_used_dirs_count(sb, gdp);