2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 #undef UFS_BALLOC_DEBUG
26 #ifdef UFS_BALLOC_DEBUG
27 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
32 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
35 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
36 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
37 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
40 * Free 'count' fragments from fragment number 'fragment'
42 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
43 struct super_block * sb;
44 struct ufs_sb_private_info * uspi;
45 struct ufs_super_block_first * usb1;
46 struct ufs_cg_private_info * ucpi;
47 struct ufs_cylinder_group * ucg;
48 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
51 uspi = UFS_SB(sb)->s_uspi;
52 usb1 = ubh_get_usb_first(USPI_UBH);
54 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
56 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
57 ufs_error (sb, "ufs_free_fragments", "internal error");
61 cgno = ufs_dtog(fragment);
62 bit = ufs_dtogd(fragment);
63 if (cgno >= uspi->s_ncg) {
64 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
68 ucpi = ufs_load_cylinder (sb, cgno);
71 ucg = ubh_get_ucg (UCPI_UBH);
72 if (!ufs_cg_chkmagic(sb, ucg)) {
73 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
77 end_bit = bit + count;
78 bbase = ufs_blknum (bit);
79 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
80 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
81 for (i = bit; i < end_bit; i++) {
82 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
83 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
84 else ufs_error (sb, "ufs_free_fragments",
85 "bit already cleared for fragment %u", i);
88 DQUOT_FREE_BLOCK (inode, count);
91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
92 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
93 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
94 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
95 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
98 * Trying to reassemble free fragments into block
100 blkno = ufs_fragstoblks (bbase);
101 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
102 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
103 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
104 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
105 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
106 ufs_clusteracct (sb, ucpi, blkno, 1);
107 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
108 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
109 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
110 cylno = ufs_cbtocylno (bbase);
111 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
115 ubh_mark_buffer_dirty (USPI_UBH);
116 ubh_mark_buffer_dirty (UCPI_UBH);
117 if (sb->s_flags & MS_SYNCHRONOUS) {
118 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
119 ubh_wait_on_buffer (UCPI_UBH);
129 UFSD(("EXIT (FAILED)\n"))
134 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
139 struct ufs_super_block_first * usb1;
140 struct ufs_cg_private_info * ucpi;
141 struct ufs_cylinder_group * ucg;
142 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
145 uspi = UFS_SB(sb)->s_uspi;
146 usb1 = ubh_get_usb_first(USPI_UBH);
148 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
152 "fragment %u, count %u\n", fragment, count);
160 cgno = ufs_dtog (fragment);
161 bit = ufs_dtogd (fragment);
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
173 ucpi = ufs_load_cylinder (sb, cgno);
176 ucg = ubh_get_ucg (UCPI_UBH);
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
184 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
189 ufs_clusteracct (sb, ucpi, blkno, 1);
190 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 cylno = ufs_cbtocylno(i);
196 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
197 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200 ubh_mark_buffer_dirty (USPI_UBH);
201 ubh_mark_buffer_dirty (UCPI_UBH);
202 if (sb->s_flags & MS_SYNCHRONOUS) {
203 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
204 ubh_wait_on_buffer (UCPI_UBH);
220 UFSD(("EXIT (FAILED)\n"))
226 #define NULLIFY_FRAGMENTS \
227 for (i = oldcount; i < newcount; i++) { \
228 bh = sb_getblk(sb, result + i); \
229 memset (bh->b_data, 0, sb->s_blocksize); \
230 set_buffer_uptodate(bh); \
231 mark_buffer_dirty (bh); \
232 if (IS_SYNC(inode)) \
233 sync_dirty_buffer(bh); \
237 unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
238 unsigned goal, unsigned count, int * err )
240 struct super_block * sb;
241 struct ufs_sb_private_info * uspi;
242 struct ufs_super_block_first * usb1;
243 struct buffer_head * bh;
244 unsigned cgno, oldcount, newcount, tmp, request, i, result;
246 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
249 uspi = UFS_SB(sb)->s_uspi;
250 usb1 = ubh_get_usb_first(USPI_UBH);
255 tmp = fs32_to_cpu(sb, *p);
256 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
257 ufs_warning (sb, "ufs_new_fragments", "internal warning"
258 " fragment %u, count %u", fragment, count);
259 count = uspi->s_fpb - ufs_fragnum(fragment);
261 oldcount = ufs_fragnum (fragment);
262 newcount = oldcount + count;
265 * Somebody else has just allocated our fragments
269 ufs_error (sb, "ufs_new_fragments", "internal error, "
270 "fragment %u, tmp %u\n", fragment, tmp);
274 if (fragment < UFS_I(inode)->i_lastfrag) {
275 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
282 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
289 * There is not enough space for user on the device
291 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
293 UFSD(("EXIT (FAILED)\n"))
297 if (goal >= uspi->s_size)
300 cgno = ufs_inotocg (inode->i_ino);
302 cgno = ufs_dtog (goal);
305 * allocate new fragment
308 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
310 *p = cpu_to_fs32(sb, result);
312 inode->i_blocks += count << uspi->s_nspfshift;
313 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
317 UFSD(("EXIT, result %u\n", result))
324 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
327 inode->i_blocks += count << uspi->s_nspfshift;
328 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
331 UFSD(("EXIT, result %u\n", result))
336 * allocate new block and move data
338 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
341 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
342 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
344 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
347 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
350 request = uspi->s_fpb;
351 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
352 (uspi->s_minfree - 2) / 100)
354 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
357 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
359 for (i = 0; i < oldcount; i++) {
360 bh = sb_bread(sb, tmp + i);
363 clear_buffer_dirty(bh);
364 bh->b_blocknr = result + i;
365 mark_buffer_dirty (bh);
367 sync_dirty_buffer(bh);
372 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
377 *p = cpu_to_fs32(sb, result);
379 inode->i_blocks += count << uspi->s_nspfshift;
380 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
383 if (newcount < request)
384 ufs_free_fragments (inode, result + newcount, request - newcount);
385 ufs_free_fragments (inode, tmp, oldcount);
386 UFSD(("EXIT, result %u\n", result))
391 UFSD(("EXIT (FAILED)\n"))
396 ufs_add_fragments (struct inode * inode, unsigned fragment,
397 unsigned oldcount, unsigned newcount, int * err)
399 struct super_block * sb;
400 struct ufs_sb_private_info * uspi;
401 struct ufs_super_block_first * usb1;
402 struct ufs_cg_private_info * ucpi;
403 struct ufs_cylinder_group * ucg;
404 unsigned cgno, fragno, fragoff, count, fragsize, i;
406 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
409 uspi = UFS_SB(sb)->s_uspi;
410 usb1 = ubh_get_usb_first (USPI_UBH);
411 count = newcount - oldcount;
413 cgno = ufs_dtog(fragment);
414 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
416 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
418 ucpi = ufs_load_cylinder (sb, cgno);
421 ucg = ubh_get_ucg (UCPI_UBH);
422 if (!ufs_cg_chkmagic(sb, ucg)) {
423 ufs_panic (sb, "ufs_add_fragments",
424 "internal error, bad magic number on cg %u", cgno);
428 fragno = ufs_dtogd (fragment);
429 fragoff = ufs_fragnum (fragno);
430 for (i = oldcount; i < newcount; i++)
431 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
434 * Block can be extended
436 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
437 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
438 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
440 fragsize = i - oldcount;
441 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
442 ufs_panic (sb, "ufs_add_fragments",
443 "internal error or corrupted bitmap on cg %u", cgno);
444 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
445 if (fragsize != count)
446 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
447 for (i = oldcount; i < newcount; i++)
448 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
449 if(DQUOT_ALLOC_BLOCK(inode, count)) {
454 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
455 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
456 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
458 ubh_mark_buffer_dirty (USPI_UBH);
459 ubh_mark_buffer_dirty (UCPI_UBH);
460 if (sb->s_flags & MS_SYNCHRONOUS) {
461 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
462 ubh_wait_on_buffer (UCPI_UBH);
466 UFSD(("EXIT, fragment %u\n", fragment))
471 #define UFS_TEST_FREE_SPACE_CG \
472 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
473 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
475 for (k = count; k < uspi->s_fpb; k++) \
476 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
479 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
480 unsigned goal, unsigned count, int * err)
482 struct super_block * sb;
483 struct ufs_sb_private_info * uspi;
484 struct ufs_super_block_first * usb1;
485 struct ufs_cg_private_info * ucpi;
486 struct ufs_cylinder_group * ucg;
487 unsigned oldcg, i, j, k, result, allocsize;
489 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
492 uspi = UFS_SB(sb)->s_uspi;
493 usb1 = ubh_get_usb_first(USPI_UBH);
497 * 1. searching on preferred cylinder group
499 UFS_TEST_FREE_SPACE_CG
502 * 2. quadratic rehash
504 for (j = 1; j < uspi->s_ncg; j *= 2) {
506 if (cgno >= uspi->s_ncg)
508 UFS_TEST_FREE_SPACE_CG
512 * 3. brute force search
513 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
515 cgno = (oldcg + 1) % uspi->s_ncg;
516 for (j = 2; j < uspi->s_ncg; j++) {
518 if (cgno >= uspi->s_ncg)
520 UFS_TEST_FREE_SPACE_CG
523 UFSD(("EXIT (FAILED)\n"))
527 ucpi = ufs_load_cylinder (sb, cgno);
530 ucg = ubh_get_ucg (UCPI_UBH);
531 if (!ufs_cg_chkmagic(sb, ucg))
532 ufs_panic (sb, "ufs_alloc_fragments",
533 "internal error, bad magic number on cg %u", cgno);
534 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
536 if (count == uspi->s_fpb) {
537 result = ufs_alloccg_block (inode, ucpi, goal, err);
538 if (result == (unsigned)-1)
543 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
544 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
547 if (allocsize == uspi->s_fpb) {
548 result = ufs_alloccg_block (inode, ucpi, goal, err);
549 if (result == (unsigned)-1)
551 goal = ufs_dtogd (result);
552 for (i = count; i < uspi->s_fpb; i++)
553 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
554 i = uspi->s_fpb - count;
555 DQUOT_FREE_BLOCK(inode, i);
557 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
558 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
559 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
560 fs32_add(sb, &ucg->cg_frsum[i], 1);
564 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
565 if (result == (unsigned)-1)
567 if(DQUOT_ALLOC_BLOCK(inode, count)) {
571 for (i = 0; i < count; i++)
572 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
574 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
575 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
576 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
577 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
579 if (count != allocsize)
580 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
583 ubh_mark_buffer_dirty (USPI_UBH);
584 ubh_mark_buffer_dirty (UCPI_UBH);
585 if (sb->s_flags & MS_SYNCHRONOUS) {
586 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
587 ubh_wait_on_buffer (UCPI_UBH);
591 result += cgno * uspi->s_fpg;
592 UFSD(("EXIT3, result %u\n", result))
596 static unsigned ufs_alloccg_block (struct inode * inode,
597 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
599 struct super_block * sb;
600 struct ufs_sb_private_info * uspi;
601 struct ufs_super_block_first * usb1;
602 struct ufs_cylinder_group * ucg;
603 unsigned result, cylno, blkno;
605 UFSD(("ENTER, goal %u\n", goal))
608 uspi = UFS_SB(sb)->s_uspi;
609 usb1 = ubh_get_usb_first(USPI_UBH);
610 ucg = ubh_get_ucg(UCPI_UBH);
613 goal = ucpi->c_rotor;
616 goal = ufs_blknum (goal);
617 goal = ufs_dtogd (goal);
620 * If the requested block is available, use it.
622 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
628 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
629 if (result == (unsigned)-1)
631 ucpi->c_rotor = result;
633 blkno = ufs_fragstoblks(result);
634 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
635 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
636 ufs_clusteracct (sb, ucpi, blkno, -1);
637 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
642 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
643 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
644 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
645 cylno = ufs_cbtocylno(result);
646 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
647 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
649 UFSD(("EXIT, result %u\n", result))
654 static unsigned ufs_bitmap_search (struct super_block * sb,
655 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
657 struct ufs_sb_private_info * uspi;
658 struct ufs_super_block_first * usb1;
659 struct ufs_cylinder_group * ucg;
660 unsigned start, length, location, result;
661 unsigned possition, fragsize, blockmap, mask;
663 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
665 uspi = UFS_SB(sb)->s_uspi;
666 usb1 = ubh_get_usb_first (USPI_UBH);
667 ucg = ubh_get_ucg(UCPI_UBH);
670 start = ufs_dtogd(goal) >> 3;
672 start = ucpi->c_frotor >> 3;
674 length = ((uspi->s_fpg + 7) >> 3) - start;
675 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
676 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
677 1 << (count - 1 + (uspi->s_fpb & 7)));
680 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
681 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
682 1 << (count - 1 + (uspi->s_fpb & 7)));
684 ufs_error (sb, "ufs_bitmap_search",
685 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
686 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
691 result = (start + length - location) << 3;
692 ucpi->c_frotor = result;
695 * found the byte in the map
697 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
699 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
700 if (blockmap & mask) {
701 if (!(possition & uspi->s_fpbmask))
707 if (fragsize == count) {
708 result += possition - count;
709 UFSD(("EXIT, result %u\n", result))
715 if (fragsize == count) {
716 result += possition - count;
717 UFSD(("EXIT, result %u\n", result))
720 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
721 UFSD(("EXIT (FAILED)\n"))
725 static void ufs_clusteracct(struct super_block * sb,
726 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
728 struct ufs_sb_private_info * uspi;
729 int i, start, end, forw, back;
731 uspi = UFS_SB(sb)->s_uspi;
732 if (uspi->s_contigsumsize <= 0)
736 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
738 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
741 * Find the size of the cluster going forward.
744 end = start + uspi->s_contigsumsize;
745 if ( end >= ucpi->c_nclusterblks)
746 end = ucpi->c_nclusterblks;
747 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
753 * Find the size of the cluster going backward.
756 end = start - uspi->s_contigsumsize;
759 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
765 * Account for old cluster and the possibly new forward and
769 if (i > uspi->s_contigsumsize)
770 i = uspi->s_contigsumsize;
771 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
773 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
775 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
779 static unsigned char ufs_fragtable_8fpb[] = {
780 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
781 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
782 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
783 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
784 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
785 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
786 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
787 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
788 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
789 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
790 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
791 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
792 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
793 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
794 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
795 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
798 static unsigned char ufs_fragtable_other[] = {
799 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
800 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
801 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
802 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
803 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
804 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
805 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
806 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
807 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
808 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
810 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
811 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
812 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
813 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
814 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,