2 * linux/fs/ext4/ialloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * BSD ufs-inspired inode and directory allocation by
10 * Stephen Tweedie (sct@redhat.com), 1993
11 * Big-endian to little-endian byte-swapping/bitmaps by
12 * David S. Miller (davem@caip.rutgers.edu), 1995
15 #include <linux/time.h>
17 #include <linux/jbd2.h>
18 #include <linux/ext4_fs.h>
19 #include <linux/ext4_jbd2.h>
20 #include <linux/stat.h>
21 #include <linux/string.h>
22 #include <linux/quotaops.h>
23 #include <linux/buffer_head.h>
24 #include <linux/random.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <asm/byteorder.h>
34 * ialloc.c contains the inodes allocation and deallocation routines
38 * The free inodes are managed by bitmaps. A file system contains several
39 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
40 * block for inodes, N blocks for the inode table and data blocks.
42 * The file system contains group descriptors which are located after the
43 * super block. Each descriptor contains the number of the bitmap block and
44 * the free blocks count in the block.
48 * To avoid calling the atomic setbit hundreds or thousands of times, we only
49 * need to use it within a single byte (to ensure we get endianness right).
50 * We can use memset for the rest of the bitmap as there are no other users.
52 void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
56 if (start_bit >= end_bit)
59 ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
60 for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
61 ext4_set_bit(i, bitmap);
63 memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
66 /* Initializes an uninitialized inode bitmap */
67 unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
68 ext4_group_t block_group,
69 struct ext4_group_desc *gdp)
71 struct ext4_sb_info *sbi = EXT4_SB(sb);
73 J_ASSERT_BH(bh, buffer_locked(bh));
75 /* If checksum is bad mark all blocks and inodes use to prevent
76 * allocation, essentially implementing a per-group read-only flag. */
77 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
78 ext4_error(sb, __FUNCTION__, "Checksum bad for group %lu\n",
80 gdp->bg_free_blocks_count = 0;
81 gdp->bg_free_inodes_count = 0;
82 gdp->bg_itable_unused = 0;
83 memset(bh->b_data, 0xff, sb->s_blocksize);
87 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
88 mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), EXT4_BLOCKS_PER_GROUP(sb),
91 return EXT4_INODES_PER_GROUP(sb);
95 * Read the inode allocation bitmap for a given block_group, reading
96 * into the specified slot in the superblock's bitmap cache.
98 * Return buffer_head of bitmap on success or NULL.
100 static struct buffer_head *
101 read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
103 struct ext4_group_desc *desc;
104 struct buffer_head *bh = NULL;
106 desc = ext4_get_group_desc(sb, block_group, NULL);
109 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
110 bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));
111 if (!buffer_uptodate(bh)) {
113 if (!buffer_uptodate(bh)) {
114 ext4_init_inode_bitmap(sb, bh, block_group,
116 set_buffer_uptodate(bh);
121 bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
124 ext4_error(sb, "read_inode_bitmap",
125 "Cannot read inode bitmap - "
126 "block_group = %lu, inode_bitmap = %llu",
127 block_group, ext4_inode_bitmap(sb, desc));
133 * NOTE! When we get the inode, we're the only people
134 * that have access to it, and as such there are no
135 * race conditions we have to worry about. The inode
136 * is not on the hash-lists, and it cannot be reached
137 * through the filesystem because the directory entry
138 * has been deleted earlier.
140 * HOWEVER: we must make sure that we get no aliases,
141 * which means that we have to call "clear_inode()"
142 * _before_ we mark the inode not in use in the inode
143 * bitmaps. Otherwise a newly created file might use
144 * the same inode number (not actually the same pointer
145 * though), and then we'd have two inodes sharing the
146 * same inode number and space on the harddisk.
148 void ext4_free_inode (handle_t *handle, struct inode * inode)
150 struct super_block * sb = inode->i_sb;
153 struct buffer_head *bitmap_bh = NULL;
154 struct buffer_head *bh2;
155 ext4_group_t block_group;
157 struct ext4_group_desc * gdp;
158 struct ext4_super_block * es;
159 struct ext4_sb_info *sbi;
162 if (atomic_read(&inode->i_count) > 1) {
163 printk ("ext4_free_inode: inode has count=%d\n",
164 atomic_read(&inode->i_count));
167 if (inode->i_nlink) {
168 printk ("ext4_free_inode: inode has nlink=%d\n",
173 printk("ext4_free_inode: inode on nonexistent device\n");
179 ext4_debug ("freeing inode %lu\n", ino);
182 * Note: we must free any quota before locking the superblock,
183 * as writing the quota to disk may need the lock as well.
186 ext4_xattr_delete_inode(handle, inode);
187 DQUOT_FREE_INODE(inode);
190 is_directory = S_ISDIR(inode->i_mode);
192 /* Do this BEFORE marking the inode not in use or returning an error */
195 es = EXT4_SB(sb)->s_es;
196 if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
197 ext4_error (sb, "ext4_free_inode",
198 "reserved or nonexistent inode %lu", ino);
201 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
202 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
203 bitmap_bh = read_inode_bitmap(sb, block_group);
207 BUFFER_TRACE(bitmap_bh, "get_write_access");
208 fatal = ext4_journal_get_write_access(handle, bitmap_bh);
212 /* Ok, now we can actually update the inode bitmaps.. */
213 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
214 bit, bitmap_bh->b_data))
215 ext4_error (sb, "ext4_free_inode",
216 "bit already cleared for inode %lu", ino);
218 gdp = ext4_get_group_desc (sb, block_group, &bh2);
220 BUFFER_TRACE(bh2, "get_write_access");
221 fatal = ext4_journal_get_write_access(handle, bh2);
222 if (fatal) goto error_return;
225 spin_lock(sb_bgl_lock(sbi, block_group));
226 gdp->bg_free_inodes_count = cpu_to_le16(
227 le16_to_cpu(gdp->bg_free_inodes_count) + 1);
229 gdp->bg_used_dirs_count = cpu_to_le16(
230 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
231 gdp->bg_checksum = ext4_group_desc_csum(sbi,
233 spin_unlock(sb_bgl_lock(sbi, block_group));
234 percpu_counter_inc(&sbi->s_freeinodes_counter);
236 percpu_counter_dec(&sbi->s_dirs_counter);
239 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
240 err = ext4_journal_dirty_metadata(handle, bh2);
241 if (!fatal) fatal = err;
243 BUFFER_TRACE(bitmap_bh, "call ext4_journal_dirty_metadata");
244 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
250 ext4_std_error(sb, fatal);
254 * There are two policies for allocating an inode. If the new inode is
255 * a directory, then a forward search is made for a block group with both
256 * free space and a low directory-to-inode ratio; if that fails, then of
257 * the groups with above-average free space, that group with the fewest
258 * directories already is chosen.
260 * For other inodes, search forward from the parent directory\'s block
261 * group to find a free inode.
263 static int find_group_dir(struct super_block *sb, struct inode *parent,
264 ext4_group_t *best_group)
266 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
267 unsigned int freei, avefreei;
268 struct ext4_group_desc *desc, *best_desc = NULL;
272 freei = percpu_counter_read_positive(&EXT4_SB(sb)->s_freeinodes_counter);
273 avefreei = freei / ngroups;
275 for (group = 0; group < ngroups; group++) {
276 desc = ext4_get_group_desc (sb, group, NULL);
277 if (!desc || !desc->bg_free_inodes_count)
279 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
282 (le16_to_cpu(desc->bg_free_blocks_count) >
283 le16_to_cpu(best_desc->bg_free_blocks_count))) {
293 * Orlov's allocator for directories.
295 * We always try to spread first-level directories.
297 * If there are blockgroups with both free inodes and free blocks counts
298 * not worse than average we return one with smallest directory count.
299 * Otherwise we simply return a random group.
301 * For the rest rules look so:
303 * It's OK to put directory into a group unless
304 * it has too many directories already (max_dirs) or
305 * it has too few free inodes left (min_inodes) or
306 * it has too few free blocks left (min_blocks) or
307 * it's already running too large debt (max_debt).
308 * Parent's group is prefered, if it doesn't satisfy these
309 * conditions we search cyclically through the rest. If none
310 * of the groups look good we just look for a group with more
311 * free inodes than average (starting at parent's group).
313 * Debt is incremented each time we allocate a directory and decremented
314 * when we allocate an inode, within 0--255.
317 #define INODE_COST 64
318 #define BLOCK_COST 256
320 static int find_group_orlov(struct super_block *sb, struct inode *parent,
323 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
324 struct ext4_sb_info *sbi = EXT4_SB(sb);
325 struct ext4_super_block *es = sbi->s_es;
326 ext4_group_t ngroups = sbi->s_groups_count;
327 int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
328 unsigned int freei, avefreei;
329 ext4_fsblk_t freeb, avefreeb;
330 ext4_fsblk_t blocks_per_dir;
332 int max_debt, max_dirs, min_inodes;
333 ext4_grpblk_t min_blocks;
335 struct ext4_group_desc *desc;
337 freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
338 avefreei = freei / ngroups;
339 freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
341 do_div(avefreeb, ngroups);
342 ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
344 if ((parent == sb->s_root->d_inode) ||
345 (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
346 int best_ndir = inodes_per_group;
350 get_random_bytes(&grp, sizeof(grp));
351 parent_group = (unsigned)grp % ngroups;
352 for (i = 0; i < ngroups; i++) {
353 grp = (parent_group + i) % ngroups;
354 desc = ext4_get_group_desc(sb, grp, NULL);
355 if (!desc || !desc->bg_free_inodes_count)
357 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
359 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
361 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
365 best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
372 blocks_per_dir = ext4_blocks_count(es) - freeb;
373 do_div(blocks_per_dir, ndirs);
375 max_dirs = ndirs / ngroups + inodes_per_group / 16;
376 min_inodes = avefreei - inodes_per_group / 4;
377 min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
379 max_debt = EXT4_BLOCKS_PER_GROUP(sb);
380 max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
381 if (max_debt * INODE_COST > inodes_per_group)
382 max_debt = inodes_per_group / INODE_COST;
388 for (i = 0; i < ngroups; i++) {
389 *group = (parent_group + i) % ngroups;
390 desc = ext4_get_group_desc(sb, *group, NULL);
391 if (!desc || !desc->bg_free_inodes_count)
393 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
395 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
397 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
403 for (i = 0; i < ngroups; i++) {
404 *group = (parent_group + i) % ngroups;
405 desc = ext4_get_group_desc(sb, *group, NULL);
406 if (desc && desc->bg_free_inodes_count &&
407 le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
413 * The free-inodes counter is approximate, and for really small
414 * filesystems the above test can fail to find any blockgroups
423 static int find_group_other(struct super_block *sb, struct inode *parent,
426 ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
427 ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
428 struct ext4_group_desc *desc;
432 * Try to place the inode in its parent directory
434 *group = parent_group;
435 desc = ext4_get_group_desc(sb, *group, NULL);
436 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
437 le16_to_cpu(desc->bg_free_blocks_count))
441 * We're going to place this inode in a different blockgroup from its
442 * parent. We want to cause files in a common directory to all land in
443 * the same blockgroup. But we want files which are in a different
444 * directory which shares a blockgroup with our parent to land in a
445 * different blockgroup.
447 * So add our directory's i_ino into the starting point for the hash.
449 *group = (*group + parent->i_ino) % ngroups;
452 * Use a quadratic hash to find a group with a free inode and some free
455 for (i = 1; i < ngroups; i <<= 1) {
457 if (*group >= ngroups)
459 desc = ext4_get_group_desc(sb, *group, NULL);
460 if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
461 le16_to_cpu(desc->bg_free_blocks_count))
466 * That failed: try linear search for a free inode, even if that group
467 * has no free blocks.
469 *group = parent_group;
470 for (i = 0; i < ngroups; i++) {
471 if (++*group >= ngroups)
473 desc = ext4_get_group_desc(sb, *group, NULL);
474 if (desc && le16_to_cpu(desc->bg_free_inodes_count))
482 * There are two policies for allocating an inode. If the new inode is
483 * a directory, then a forward search is made for a block group with both
484 * free space and a low directory-to-inode ratio; if that fails, then of
485 * the groups with above-average free space, that group with the fewest
486 * directories already is chosen.
488 * For other inodes, search forward from the parent directory's block
489 * group to find a free inode.
491 struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
493 struct super_block *sb;
494 struct buffer_head *bitmap_bh = NULL;
495 struct buffer_head *bh2;
496 ext4_group_t group = 0;
497 unsigned long ino = 0;
498 struct inode * inode;
499 struct ext4_group_desc * gdp = NULL;
500 struct ext4_super_block * es;
501 struct ext4_inode_info *ei;
502 struct ext4_sb_info *sbi;
508 /* Cannot create files in a deleted directory */
509 if (!dir || !dir->i_nlink)
510 return ERR_PTR(-EPERM);
513 inode = new_inode(sb);
515 return ERR_PTR(-ENOMEM);
521 if (test_opt (sb, OLDALLOC))
522 ret2 = find_group_dir(sb, dir, &group);
524 ret2 = find_group_orlov(sb, dir, &group);
526 ret2 = find_group_other(sb, dir, &group);
532 for (i = 0; i < sbi->s_groups_count; i++) {
535 gdp = ext4_get_group_desc(sb, group, &bh2);
540 bitmap_bh = read_inode_bitmap(sb, group);
546 repeat_in_this_group:
547 ino = ext4_find_next_zero_bit((unsigned long *)
548 bitmap_bh->b_data, EXT4_INODES_PER_GROUP(sb), ino);
549 if (ino < EXT4_INODES_PER_GROUP(sb)) {
551 BUFFER_TRACE(bitmap_bh, "get_write_access");
552 err = ext4_journal_get_write_access(handle, bitmap_bh);
556 if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
557 ino, bitmap_bh->b_data)) {
559 BUFFER_TRACE(bitmap_bh,
560 "call ext4_journal_dirty_metadata");
561 err = ext4_journal_dirty_metadata(handle,
568 jbd2_journal_release_buffer(handle, bitmap_bh);
570 if (++ino < EXT4_INODES_PER_GROUP(sb))
571 goto repeat_in_this_group;
575 * This case is possible in concurrent environment. It is very
576 * rare. We cannot repeat the find_group_xxx() call because
577 * that will simply return the same blockgroup, because the
578 * group descriptor metadata has not yet been updated.
579 * So we just go onto the next blockgroup.
581 if (++group == sbi->s_groups_count)
589 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
590 ino > EXT4_INODES_PER_GROUP(sb)) {
591 ext4_error(sb, __FUNCTION__,
592 "reserved inode or inode > inodes count - "
593 "block_group = %lu, inode=%lu", group,
594 ino + group * EXT4_INODES_PER_GROUP(sb));
599 BUFFER_TRACE(bh2, "get_write_access");
600 err = ext4_journal_get_write_access(handle, bh2);
603 /* We may have to initialize the block bitmap if it isn't already */
604 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
605 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
606 struct buffer_head *block_bh = read_block_bitmap(sb, group);
608 BUFFER_TRACE(block_bh, "get block bitmap access");
609 err = ext4_journal_get_write_access(handle, block_bh);
616 spin_lock(sb_bgl_lock(sbi, group));
617 /* recheck and clear flag under lock if we still need to */
618 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
619 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
620 free = ext4_free_blocks_after_init(sb, group, gdp);
621 gdp->bg_free_blocks_count = cpu_to_le16(free);
623 spin_unlock(sb_bgl_lock(sbi, group));
625 /* Don't need to dirty bitmap block if we didn't change it */
627 BUFFER_TRACE(block_bh, "dirty block bitmap");
628 err = ext4_journal_dirty_metadata(handle, block_bh);
636 spin_lock(sb_bgl_lock(sbi, group));
637 /* If we didn't allocate from within the initialized part of the inode
638 * table then we need to initialize up to this inode. */
639 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
640 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
641 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
643 /* When marking the block group with
644 * ~EXT4_BG_INODE_UNINIT we don't want to depend
645 * on the value of bg_itable_unsed even though
646 * mke2fs could have initialized the same for us.
647 * Instead we calculated the value below
652 free = EXT4_INODES_PER_GROUP(sb) -
653 le16_to_cpu(gdp->bg_itable_unused);
657 * Check the relative inode number against the last used
658 * relative inode number in this group. if it is greater
659 * we need to update the bg_itable_unused count
663 gdp->bg_itable_unused =
664 cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
667 gdp->bg_free_inodes_count =
668 cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
670 gdp->bg_used_dirs_count =
671 cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
673 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
674 spin_unlock(sb_bgl_lock(sbi, group));
675 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
676 err = ext4_journal_dirty_metadata(handle, bh2);
679 percpu_counter_dec(&sbi->s_freeinodes_counter);
681 percpu_counter_inc(&sbi->s_dirs_counter);
684 inode->i_uid = current->fsuid;
685 if (test_opt (sb, GRPID))
686 inode->i_gid = dir->i_gid;
687 else if (dir->i_mode & S_ISGID) {
688 inode->i_gid = dir->i_gid;
692 inode->i_gid = current->fsgid;
693 inode->i_mode = mode;
695 inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
696 /* This is the optimal IO size (for stat), not the fs block size */
698 inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
699 ext4_current_time(inode);
701 memset(ei->i_data, 0, sizeof(ei->i_data));
702 ei->i_dir_start_lookup = 0;
705 ei->i_flags = EXT4_I(dir)->i_flags & ~EXT4_INDEX_FL;
707 ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
708 /* dirsync only applies to directories */
710 ei->i_flags &= ~EXT4_DIRSYNC_FL;
713 ei->i_block_alloc_info = NULL;
714 ei->i_block_group = group;
716 ext4_set_inode_flags(inode);
717 if (IS_DIRSYNC(inode))
719 insert_inode_hash(inode);
720 spin_lock(&sbi->s_next_gen_lock);
721 inode->i_generation = sbi->s_next_generation++;
722 spin_unlock(&sbi->s_next_gen_lock);
724 ei->i_state = EXT4_STATE_NEW;
726 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
729 if(DQUOT_ALLOC_INODE(inode)) {
734 err = ext4_init_acl(handle, inode, dir);
738 err = ext4_init_security(handle,inode, dir);
742 err = ext4_mark_inode_dirty(handle, inode);
744 ext4_std_error(sb, err);
747 if (test_opt(sb, EXTENTS)) {
748 EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
749 ext4_ext_tree_init(handle, inode);
750 err = ext4_update_incompat_feature(handle, sb,
751 EXT4_FEATURE_INCOMPAT_EXTENTS);
756 ext4_debug("allocating inode %lu\n", inode->i_ino);
759 ext4_std_error(sb, err);
768 DQUOT_FREE_INODE(inode);
772 inode->i_flags |= S_NOQUOTA;
779 /* Verify that we are loading a valid orphan from disk */
780 struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
782 unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
783 ext4_group_t block_group;
785 struct buffer_head *bitmap_bh = NULL;
786 struct inode *inode = NULL;
788 /* Error cases - e2fsck has already cleaned up for us */
790 ext4_warning(sb, __FUNCTION__,
791 "bad orphan ino %lu! e2fsck was run?", ino);
795 block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
796 bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
797 bitmap_bh = read_inode_bitmap(sb, block_group);
799 ext4_warning(sb, __FUNCTION__,
800 "inode bitmap error for orphan %lu", ino);
804 /* Having the inode bit set should be a 100% indicator that this
805 * is a valid orphan (no e2fsck run on fs). Orphans also include
806 * inodes that were being truncated, so we can't check i_nlink==0.
808 if (!ext4_test_bit(bit, bitmap_bh->b_data) ||
809 !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
810 NEXT_ORPHAN(inode) > max_ino) {
811 ext4_warning(sb, __FUNCTION__,
812 "bad orphan inode %lu! e2fsck was run?", ino);
813 printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
814 bit, (unsigned long long)bitmap_bh->b_blocknr,
815 ext4_test_bit(bit, bitmap_bh->b_data));
816 printk(KERN_NOTICE "inode=%p\n", inode);
818 printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
819 is_bad_inode(inode));
820 printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
822 printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
824 /* Avoid freeing blocks if we got a bad deleted inode */
825 if (inode && inode->i_nlink == 0)
835 unsigned long ext4_count_free_inodes (struct super_block * sb)
837 unsigned long desc_count;
838 struct ext4_group_desc *gdp;
841 struct ext4_super_block *es;
842 unsigned long bitmap_count, x;
843 struct buffer_head *bitmap_bh = NULL;
845 es = EXT4_SB(sb)->s_es;
849 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
850 gdp = ext4_get_group_desc (sb, i, NULL);
853 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
855 bitmap_bh = read_inode_bitmap(sb, i);
859 x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
860 printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
861 i, le16_to_cpu(gdp->bg_free_inodes_count), x);
865 printk("ext4_count_free_inodes: stored = %u, computed = %lu, %lu\n",
866 le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
870 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
871 gdp = ext4_get_group_desc (sb, i, NULL);
874 desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
881 /* Called at mount-time, super-block is locked */
882 unsigned long ext4_count_dirs (struct super_block * sb)
884 unsigned long count = 0;
887 for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
888 struct ext4_group_desc *gdp = ext4_get_group_desc (sb, i, NULL);
891 count += le16_to_cpu(gdp->bg_used_dirs_count);