1 #include <linux/kernel.h>
3 #include <linux/buffer_head.h>
7 unsigned long omfs_count_free(struct super_block *sb)
10 unsigned long sum = 0;
11 struct omfs_sb_info *sbi = OMFS_SB(sb);
12 int nbits = sb->s_blocksize * 8;
14 for (i = 0; i < sbi->s_imap_size; i++)
15 sum += nbits - bitmap_weight(sbi->s_imap[i], nbits);
21 * Counts the run of zero bits starting at bit up to max.
22 * It handles the case where a run might spill over a buffer.
23 * Called with bitmap lock.
25 static int count_run(unsigned long **addr, int nbits,
26 int addrlen, int bit, int max)
31 for (; addrlen > 0; addrlen--, addr++) {
32 x = find_next_bit(*addr, nbits, bit);
35 if (x < nbits || count > max)
36 return min(count, max);
40 return min(count, max);
44 * Sets or clears the run of count bits starting with bit.
45 * Called with bitmap lock.
47 static int set_run(struct super_block *sb, int map,
48 int nbits, int bit, int count, int set)
52 struct buffer_head *bh;
53 struct omfs_sb_info *sbi = OMFS_SB(sb);
56 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
60 for (i = 0; i < count; i++, bit++) {
65 mark_buffer_dirty(bh);
68 clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
73 set_bit(bit, sbi->s_imap[map]);
74 set_bit(bit, (unsigned long *)bh->b_data);
76 clear_bit(bit, sbi->s_imap[map]);
77 clear_bit(bit, (unsigned long *)bh->b_data);
80 mark_buffer_dirty(bh);
88 * Tries to allocate exactly one block. Returns true if sucessful.
90 int omfs_allocate_block(struct super_block *sb, u64 block)
92 struct buffer_head *bh;
93 struct omfs_sb_info *sbi = OMFS_SB(sb);
94 int bits_per_entry = 8 * sb->s_blocksize;
95 unsigned int map, bit;
100 bit = do_div(tmp, bits_per_entry);
103 mutex_lock(&sbi->s_bitmap_lock);
104 if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map]))
107 if (sbi->s_bitmap_ino > 0) {
108 bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map);
112 set_bit(bit, (unsigned long *)bh->b_data);
113 mark_buffer_dirty(bh);
118 mutex_unlock(&sbi->s_bitmap_lock);
124 * Tries to allocate a set of blocks. The request size depends on the
125 * type: for inodes, we must allocate sbi->s_mirrors blocks, and for file
126 * blocks, we try to allocate sbi->s_clustersize, but can always get away
127 * with just one block.
129 int omfs_allocate_range(struct super_block *sb,
135 struct omfs_sb_info *sbi = OMFS_SB(sb);
136 int bits_per_entry = 8 * sb->s_blocksize;
140 mutex_lock(&sbi->s_bitmap_lock);
141 for (i = 0; i < sbi->s_imap_size; i++) {
143 while (bit < bits_per_entry) {
144 bit = find_next_zero_bit(sbi->s_imap[i], bits_per_entry,
147 if (bit == bits_per_entry)
150 run = count_run(&sbi->s_imap[i], bits_per_entry,
151 sbi->s_imap_size-i, bit, max_request);
153 if (run >= min_request)
162 *return_block = i * bits_per_entry + bit;
164 ret = set_run(sb, i, bits_per_entry, bit, run, 1);
167 mutex_unlock(&sbi->s_bitmap_lock);
172 * Clears count bits starting at a given block.
174 int omfs_clear_range(struct super_block *sb, u64 block, int count)
176 struct omfs_sb_info *sbi = OMFS_SB(sb);
177 int bits_per_entry = 8 * sb->s_blocksize;
179 unsigned int map, bit;
183 bit = do_div(tmp, bits_per_entry);
186 if (map >= sbi->s_imap_size)
189 mutex_lock(&sbi->s_bitmap_lock);
190 ret = set_run(sb, map, bits_per_entry, bit, count, 0);
191 mutex_unlock(&sbi->s_bitmap_lock);