2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/sched.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
23 #undef UFS_BALLOC_DEBUG
25 #ifdef UFS_BALLOC_DEBUG
26 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
31 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
32 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
34 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
35 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
36 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
39 * Free 'count' fragments from fragment number 'fragment'
41 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
42 struct super_block * sb;
43 struct ufs_sb_private_info * uspi;
44 struct ufs_super_block_first * usb1;
45 struct ufs_cg_private_info * ucpi;
46 struct ufs_cylinder_group * ucg;
47 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
50 uspi = UFS_SB(sb)->s_uspi;
51 usb1 = ubh_get_usb_first(USPI_UBH);
53 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
55 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 ufs_error (sb, "ufs_free_fragments", "internal error");
60 cgno = ufs_dtog(fragment);
61 bit = ufs_dtogd(fragment);
62 if (cgno >= uspi->s_ncg) {
63 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
67 ucpi = ufs_load_cylinder (sb, cgno);
70 ucg = ubh_get_ucg (UCPI_UBH);
71 if (!ufs_cg_chkmagic(sb, ucg)) {
72 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
76 end_bit = bit + count;
77 bbase = ufs_blknum (bit);
78 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
79 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 for (i = bit; i < end_bit; i++) {
81 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
82 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
83 else ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 DQUOT_FREE_BLOCK (inode, count);
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
97 * Trying to reassemble free fragments into block
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 cylno = ufs_cbtocylno (bbase);
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) {
117 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
118 ubh_wait_on_buffer (UCPI_UBH);
128 UFSD(("EXIT (FAILED)\n"))
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
135 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
136 struct super_block * sb;
137 struct ufs_sb_private_info * uspi;
138 struct ufs_super_block_first * usb1;
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
141 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
144 uspi = UFS_SB(sb)->s_uspi;
145 usb1 = ubh_get_usb_first(USPI_UBH);
147 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
149 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 ufs_error (sb, "ufs_free_blocks", "internal error, "
151 "fragment %u, count %u\n", fragment, count);
159 cgno = ufs_dtog (fragment);
160 bit = ufs_dtogd (fragment);
161 if (cgno >= uspi->s_ncg) {
162 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
165 end_bit = bit + count;
166 if (end_bit > uspi->s_fpg) {
167 overflow = bit + count - uspi->s_fpg;
172 ucpi = ufs_load_cylinder (sb, cgno);
175 ucg = ubh_get_ucg (UCPI_UBH);
176 if (!ufs_cg_chkmagic(sb, ucg)) {
177 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
181 for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 blkno = ufs_fragstoblks(i);
183 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1);
189 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
191 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
192 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
193 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
194 cylno = ufs_cbtocylno(i);
195 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
196 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
199 ubh_mark_buffer_dirty (USPI_UBH);
200 ubh_mark_buffer_dirty (UCPI_UBH);
201 if (sb->s_flags & MS_SYNCHRONOUS) {
202 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
203 ubh_wait_on_buffer (UCPI_UBH);
219 UFSD(("EXIT (FAILED)\n"))
225 #define NULLIFY_FRAGMENTS \
226 for (i = oldcount; i < newcount; i++) { \
227 bh = sb_getblk(sb, result + i); \
228 memset (bh->b_data, 0, sb->s_blocksize); \
229 set_buffer_uptodate(bh); \
230 mark_buffer_dirty (bh); \
231 if (IS_SYNC(inode)) \
232 sync_dirty_buffer(bh); \
236 unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
237 unsigned goal, unsigned count, int * err )
239 struct super_block * sb;
240 struct ufs_sb_private_info * uspi;
241 struct ufs_super_block_first * usb1;
242 struct buffer_head * bh;
243 unsigned cgno, oldcount, newcount, tmp, request, i, result;
245 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
248 uspi = UFS_SB(sb)->s_uspi;
249 usb1 = ubh_get_usb_first(USPI_UBH);
254 tmp = fs32_to_cpu(sb, *p);
255 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
256 ufs_warning (sb, "ufs_new_fragments", "internal warning"
257 " fragment %u, count %u", fragment, count);
258 count = uspi->s_fpb - ufs_fragnum(fragment);
260 oldcount = ufs_fragnum (fragment);
261 newcount = oldcount + count;
264 * Somebody else has just allocated our fragments
268 ufs_error (sb, "ufs_new_fragments", "internal error, "
269 "fragment %u, tmp %u\n", fragment, tmp);
273 if (fragment < UFS_I(inode)->i_lastfrag) {
274 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
281 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
288 * There is not enough space for user on the device
290 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
292 UFSD(("EXIT (FAILED)\n"))
296 if (goal >= uspi->s_size)
299 cgno = ufs_inotocg (inode->i_ino);
301 cgno = ufs_dtog (goal);
304 * allocate new fragment
307 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
309 *p = cpu_to_fs32(sb, result);
311 inode->i_blocks += count << uspi->s_nspfshift;
312 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
316 UFSD(("EXIT, result %u\n", result))
323 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
326 inode->i_blocks += count << uspi->s_nspfshift;
327 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
330 UFSD(("EXIT, result %u\n", result))
335 * allocate new block and move data
337 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
340 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
341 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
343 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
346 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
349 request = uspi->s_fpb;
350 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
351 (uspi->s_minfree - 2) / 100)
353 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
356 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
358 for (i = 0; i < oldcount; i++) {
359 bh = sb_bread(sb, tmp + i);
362 clear_buffer_dirty(bh);
363 bh->b_blocknr = result + i;
364 mark_buffer_dirty (bh);
366 sync_dirty_buffer(bh);
371 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
376 *p = cpu_to_fs32(sb, result);
378 inode->i_blocks += count << uspi->s_nspfshift;
379 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
382 if (newcount < request)
383 ufs_free_fragments (inode, result + newcount, request - newcount);
384 ufs_free_fragments (inode, tmp, oldcount);
385 UFSD(("EXIT, result %u\n", result))
390 UFSD(("EXIT (FAILED)\n"))
395 ufs_add_fragments (struct inode * inode, unsigned fragment,
396 unsigned oldcount, unsigned newcount, int * err)
398 struct super_block * sb;
399 struct ufs_sb_private_info * uspi;
400 struct ufs_super_block_first * usb1;
401 struct ufs_cg_private_info * ucpi;
402 struct ufs_cylinder_group * ucg;
403 unsigned cgno, fragno, fragoff, count, fragsize, i;
405 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
408 uspi = UFS_SB(sb)->s_uspi;
409 usb1 = ubh_get_usb_first (USPI_UBH);
410 count = newcount - oldcount;
412 cgno = ufs_dtog(fragment);
413 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
415 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
417 ucpi = ufs_load_cylinder (sb, cgno);
420 ucg = ubh_get_ucg (UCPI_UBH);
421 if (!ufs_cg_chkmagic(sb, ucg)) {
422 ufs_panic (sb, "ufs_add_fragments",
423 "internal error, bad magic number on cg %u", cgno);
427 fragno = ufs_dtogd (fragment);
428 fragoff = ufs_fragnum (fragno);
429 for (i = oldcount; i < newcount; i++)
430 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
433 * Block can be extended
435 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
436 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
437 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
439 fragsize = i - oldcount;
440 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
441 ufs_panic (sb, "ufs_add_fragments",
442 "internal error or corrupted bitmap on cg %u", cgno);
443 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
444 if (fragsize != count)
445 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
446 for (i = oldcount; i < newcount; i++)
447 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
448 if(DQUOT_ALLOC_BLOCK(inode, count)) {
453 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
454 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
455 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
457 ubh_mark_buffer_dirty (USPI_UBH);
458 ubh_mark_buffer_dirty (UCPI_UBH);
459 if (sb->s_flags & MS_SYNCHRONOUS) {
460 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
461 ubh_wait_on_buffer (UCPI_UBH);
465 UFSD(("EXIT, fragment %u\n", fragment))
470 #define UFS_TEST_FREE_SPACE_CG \
471 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
472 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
474 for (k = count; k < uspi->s_fpb; k++) \
475 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
478 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
479 unsigned goal, unsigned count, int * err)
481 struct super_block * sb;
482 struct ufs_sb_private_info * uspi;
483 struct ufs_super_block_first * usb1;
484 struct ufs_cg_private_info * ucpi;
485 struct ufs_cylinder_group * ucg;
486 unsigned oldcg, i, j, k, result, allocsize;
488 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
491 uspi = UFS_SB(sb)->s_uspi;
492 usb1 = ubh_get_usb_first(USPI_UBH);
496 * 1. searching on preferred cylinder group
498 UFS_TEST_FREE_SPACE_CG
501 * 2. quadratic rehash
503 for (j = 1; j < uspi->s_ncg; j *= 2) {
505 if (cgno >= uspi->s_ncg)
507 UFS_TEST_FREE_SPACE_CG
511 * 3. brute force search
512 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
514 cgno = (oldcg + 1) % uspi->s_ncg;
515 for (j = 2; j < uspi->s_ncg; j++) {
517 if (cgno >= uspi->s_ncg)
519 UFS_TEST_FREE_SPACE_CG
522 UFSD(("EXIT (FAILED)\n"))
526 ucpi = ufs_load_cylinder (sb, cgno);
529 ucg = ubh_get_ucg (UCPI_UBH);
530 if (!ufs_cg_chkmagic(sb, ucg))
531 ufs_panic (sb, "ufs_alloc_fragments",
532 "internal error, bad magic number on cg %u", cgno);
533 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
535 if (count == uspi->s_fpb) {
536 result = ufs_alloccg_block (inode, ucpi, goal, err);
537 if (result == (unsigned)-1)
542 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
543 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
546 if (allocsize == uspi->s_fpb) {
547 result = ufs_alloccg_block (inode, ucpi, goal, err);
548 if (result == (unsigned)-1)
550 goal = ufs_dtogd (result);
551 for (i = count; i < uspi->s_fpb; i++)
552 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
553 i = uspi->s_fpb - count;
554 DQUOT_FREE_BLOCK(inode, i);
556 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
557 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
558 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
559 fs32_add(sb, &ucg->cg_frsum[i], 1);
563 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
564 if (result == (unsigned)-1)
566 if(DQUOT_ALLOC_BLOCK(inode, count)) {
570 for (i = 0; i < count; i++)
571 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
573 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
574 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
575 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
576 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
578 if (count != allocsize)
579 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
582 ubh_mark_buffer_dirty (USPI_UBH);
583 ubh_mark_buffer_dirty (UCPI_UBH);
584 if (sb->s_flags & MS_SYNCHRONOUS) {
585 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
586 ubh_wait_on_buffer (UCPI_UBH);
590 result += cgno * uspi->s_fpg;
591 UFSD(("EXIT3, result %u\n", result))
595 static unsigned ufs_alloccg_block (struct inode * inode,
596 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
598 struct super_block * sb;
599 struct ufs_sb_private_info * uspi;
600 struct ufs_super_block_first * usb1;
601 struct ufs_cylinder_group * ucg;
602 unsigned result, cylno, blkno;
604 UFSD(("ENTER, goal %u\n", goal))
607 uspi = UFS_SB(sb)->s_uspi;
608 usb1 = ubh_get_usb_first(USPI_UBH);
609 ucg = ubh_get_ucg(UCPI_UBH);
612 goal = ucpi->c_rotor;
615 goal = ufs_blknum (goal);
616 goal = ufs_dtogd (goal);
619 * If the requested block is available, use it.
621 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
627 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
628 if (result == (unsigned)-1)
630 ucpi->c_rotor = result;
632 blkno = ufs_fragstoblks(result);
633 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
634 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
635 ufs_clusteracct (sb, ucpi, blkno, -1);
636 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
641 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
642 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
643 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
644 cylno = ufs_cbtocylno(result);
645 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
646 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
648 UFSD(("EXIT, result %u\n", result))
653 static unsigned ufs_bitmap_search (struct super_block * sb,
654 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
656 struct ufs_sb_private_info * uspi;
657 struct ufs_super_block_first * usb1;
658 struct ufs_cylinder_group * ucg;
659 unsigned start, length, location, result;
660 unsigned possition, fragsize, blockmap, mask;
662 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
664 uspi = UFS_SB(sb)->s_uspi;
665 usb1 = ubh_get_usb_first (USPI_UBH);
666 ucg = ubh_get_ucg(UCPI_UBH);
669 start = ufs_dtogd(goal) >> 3;
671 start = ucpi->c_frotor >> 3;
673 length = ((uspi->s_fpg + 7) >> 3) - start;
674 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
675 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
676 1 << (count - 1 + (uspi->s_fpb & 7)));
679 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
680 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
681 1 << (count - 1 + (uspi->s_fpb & 7)));
683 ufs_error (sb, "ufs_bitmap_search",
684 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
685 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
690 result = (start + length - location) << 3;
691 ucpi->c_frotor = result;
694 * found the byte in the map
696 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
698 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
699 if (blockmap & mask) {
700 if (!(possition & uspi->s_fpbmask))
706 if (fragsize == count) {
707 result += possition - count;
708 UFSD(("EXIT, result %u\n", result))
714 if (fragsize == count) {
715 result += possition - count;
716 UFSD(("EXIT, result %u\n", result))
719 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
720 UFSD(("EXIT (FAILED)\n"))
724 static void ufs_clusteracct(struct super_block * sb,
725 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
727 struct ufs_sb_private_info * uspi;
728 int i, start, end, forw, back;
730 uspi = UFS_SB(sb)->s_uspi;
731 if (uspi->s_contigsumsize <= 0)
735 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
737 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
740 * Find the size of the cluster going forward.
743 end = start + uspi->s_contigsumsize;
744 if ( end >= ucpi->c_nclusterblks)
745 end = ucpi->c_nclusterblks;
746 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
752 * Find the size of the cluster going backward.
755 end = start - uspi->s_contigsumsize;
758 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
764 * Account for old cluster and the possibly new forward and
768 if (i > uspi->s_contigsumsize)
769 i = uspi->s_contigsumsize;
770 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
772 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
774 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
778 static unsigned char ufs_fragtable_8fpb[] = {
779 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
780 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
781 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
782 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
783 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
784 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
785 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
786 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
787 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
788 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
789 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
790 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
791 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
792 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
793 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
794 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
797 static unsigned char ufs_fragtable_other[] = {
798 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
799 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
800 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
801 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
802 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
803 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
805 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
806 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
807 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
808 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
810 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
811 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
812 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
813 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,