2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 #undef UFS_BALLOC_DEBUG
26 #ifdef UFS_BALLOC_DEBUG
27 #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
32 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
33 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
34 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
35 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
36 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
37 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
40 * Free 'count' fragments from fragment number 'fragment'
42 void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) {
43 struct super_block * sb;
44 struct ufs_sb_private_info * uspi;
45 struct ufs_super_block_first * usb1;
46 struct ufs_cg_private_info * ucpi;
47 struct ufs_cylinder_group * ucg;
48 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
51 uspi = UFS_SB(sb)->s_uspi;
52 usb1 = ubh_get_usb_first(uspi);
54 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
56 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
57 ufs_error (sb, "ufs_free_fragments", "internal error");
61 cgno = ufs_dtog(fragment);
62 bit = ufs_dtogd(fragment);
63 if (cgno >= uspi->s_ncg) {
64 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
68 ucpi = ufs_load_cylinder (sb, cgno);
71 ucg = ubh_get_ucg (UCPI_UBH);
72 if (!ufs_cg_chkmagic(sb, ucg)) {
73 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
77 end_bit = bit + count;
78 bbase = ufs_blknum (bit);
79 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
80 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
81 for (i = bit; i < end_bit; i++) {
82 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
83 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
85 ufs_error (sb, "ufs_free_fragments",
86 "bit already cleared for fragment %u", i);
89 DQUOT_FREE_BLOCK (inode, count);
92 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
93 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, count);
94 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
95 blkmap = ubh_blkmap (UCPI_UBH, ucpi->c_freeoff, bbase);
96 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
99 * Trying to reassemble free fragments into block
101 blkno = ufs_fragstoblks (bbase);
102 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
103 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
104 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, uspi->s_fpb);
105 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
106 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
107 ufs_clusteracct (sb, ucpi, blkno, 1);
108 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
109 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
110 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
111 cylno = ufs_cbtocylno (bbase);
112 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
113 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 ubh_mark_buffer_dirty (USPI_UBH);
117 ubh_mark_buffer_dirty (UCPI_UBH);
118 if (sb->s_flags & MS_SYNCHRONOUS) {
119 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
120 ubh_wait_on_buffer (UCPI_UBH);
130 UFSD(("EXIT (FAILED)\n"))
135 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
137 void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
138 struct super_block * sb;
139 struct ufs_sb_private_info * uspi;
140 struct ufs_super_block_first * usb1;
141 struct ufs_cg_private_info * ucpi;
142 struct ufs_cylinder_group * ucg;
143 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
146 uspi = UFS_SB(sb)->s_uspi;
147 usb1 = ubh_get_usb_first(uspi);
149 UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
151 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152 ufs_error (sb, "ufs_free_blocks", "internal error, "
153 "fragment %u, count %u\n", fragment, count);
161 cgno = ufs_dtog (fragment);
162 bit = ufs_dtogd (fragment);
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
174 ucpi = ufs_load_cylinder (sb, cgno);
177 ucg = ubh_get_ucg (UCPI_UBH);
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
185 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, blkno)) {
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188 ubh_setblock(UCPI_UBH, ucpi->c_freeoff, blkno);
189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 ufs_clusteracct (sb, ucpi, blkno, 1);
191 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194 fs32_add(sb, &usb1->fs_cstotal.cs_nbfree, 1);
195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196 cylno = ufs_cbtocylno(i);
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
198 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
201 ubh_mark_buffer_dirty (USPI_UBH);
202 ubh_mark_buffer_dirty (UCPI_UBH);
203 if (sb->s_flags & MS_SYNCHRONOUS) {
204 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH);
221 UFSD(("EXIT (FAILED)\n"))
227 #define NULLIFY_FRAGMENTS \
228 for (i = oldcount; i < newcount; i++) { \
229 bh = sb_getblk(sb, result + i); \
230 memset (bh->b_data, 0, sb->s_blocksize); \
231 set_buffer_uptodate(bh); \
232 mark_buffer_dirty (bh); \
233 if (IS_SYNC(inode)) \
234 sync_dirty_buffer(bh); \
238 unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
239 unsigned goal, unsigned count, int * err )
241 struct super_block * sb;
242 struct ufs_sb_private_info * uspi;
243 struct ufs_super_block_first * usb1;
244 struct buffer_head * bh;
245 unsigned cgno, oldcount, newcount, tmp, request, i, result;
247 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
250 uspi = UFS_SB(sb)->s_uspi;
251 usb1 = ubh_get_usb_first(uspi);
256 tmp = fs32_to_cpu(sb, *p);
257 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
258 ufs_warning (sb, "ufs_new_fragments", "internal warning"
259 " fragment %u, count %u", fragment, count);
260 count = uspi->s_fpb - ufs_fragnum(fragment);
262 oldcount = ufs_fragnum (fragment);
263 newcount = oldcount + count;
266 * Somebody else has just allocated our fragments
270 ufs_error (sb, "ufs_new_fragments", "internal error, "
271 "fragment %u, tmp %u\n", fragment, tmp);
275 if (fragment < UFS_I(inode)->i_lastfrag) {
276 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
283 UFSD(("EXIT (ALREADY ALLOCATED)\n"))
290 * There is not enough space for user on the device
292 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(usb1, UFS_MINFREE) <= 0) {
294 UFSD(("EXIT (FAILED)\n"))
298 if (goal >= uspi->s_size)
301 cgno = ufs_inotocg (inode->i_ino);
303 cgno = ufs_dtog (goal);
306 * allocate new fragment
309 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
311 *p = cpu_to_fs32(sb, result);
313 inode->i_blocks += count << uspi->s_nspfshift;
314 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
318 UFSD(("EXIT, result %u\n", result))
325 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
328 inode->i_blocks += count << uspi->s_nspfshift;
329 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
332 UFSD(("EXIT, result %u\n", result))
337 * allocate new block and move data
339 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
342 if (uspi->s_minfree < 5 || fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree)
343 > uspi->s_dsize * uspi->s_minfree / (2 * 100) )
345 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
348 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
351 request = uspi->s_fpb;
352 if (fs32_to_cpu(sb, usb1->fs_cstotal.cs_nffree) < uspi->s_dsize *
353 (uspi->s_minfree - 2) / 100)
355 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
358 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
360 for (i = 0; i < oldcount; i++) {
361 bh = sb_bread(sb, tmp + i);
364 clear_buffer_dirty(bh);
365 bh->b_blocknr = result + i;
366 mark_buffer_dirty (bh);
368 sync_dirty_buffer(bh);
373 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
378 *p = cpu_to_fs32(sb, result);
380 inode->i_blocks += count << uspi->s_nspfshift;
381 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
384 if (newcount < request)
385 ufs_free_fragments (inode, result + newcount, request - newcount);
386 ufs_free_fragments (inode, tmp, oldcount);
387 UFSD(("EXIT, result %u\n", result))
392 UFSD(("EXIT (FAILED)\n"))
397 ufs_add_fragments (struct inode * inode, unsigned fragment,
398 unsigned oldcount, unsigned newcount, int * err)
400 struct super_block * sb;
401 struct ufs_sb_private_info * uspi;
402 struct ufs_super_block_first * usb1;
403 struct ufs_cg_private_info * ucpi;
404 struct ufs_cylinder_group * ucg;
405 unsigned cgno, fragno, fragoff, count, fragsize, i;
407 UFSD(("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount))
410 uspi = UFS_SB(sb)->s_uspi;
411 usb1 = ubh_get_usb_first (uspi);
412 count = newcount - oldcount;
414 cgno = ufs_dtog(fragment);
415 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
417 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
419 ucpi = ufs_load_cylinder (sb, cgno);
422 ucg = ubh_get_ucg (UCPI_UBH);
423 if (!ufs_cg_chkmagic(sb, ucg)) {
424 ufs_panic (sb, "ufs_add_fragments",
425 "internal error, bad magic number on cg %u", cgno);
429 fragno = ufs_dtogd (fragment);
430 fragoff = ufs_fragnum (fragno);
431 for (i = oldcount; i < newcount; i++)
432 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
435 * Block can be extended
437 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
438 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
439 if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, fragno + i))
441 fragsize = i - oldcount;
442 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
443 ufs_panic (sb, "ufs_add_fragments",
444 "internal error or corrupted bitmap on cg %u", cgno);
445 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
446 if (fragsize != count)
447 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
448 for (i = oldcount; i < newcount; i++)
449 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, fragno + i);
450 if(DQUOT_ALLOC_BLOCK(inode, count)) {
455 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
456 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
457 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
459 ubh_mark_buffer_dirty (USPI_UBH);
460 ubh_mark_buffer_dirty (UCPI_UBH);
461 if (sb->s_flags & MS_SYNCHRONOUS) {
462 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
463 ubh_wait_on_buffer (UCPI_UBH);
467 UFSD(("EXIT, fragment %u\n", fragment))
472 #define UFS_TEST_FREE_SPACE_CG \
473 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
474 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
476 for (k = count; k < uspi->s_fpb; k++) \
477 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
480 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
481 unsigned goal, unsigned count, int * err)
483 struct super_block * sb;
484 struct ufs_sb_private_info * uspi;
485 struct ufs_super_block_first * usb1;
486 struct ufs_cg_private_info * ucpi;
487 struct ufs_cylinder_group * ucg;
488 unsigned oldcg, i, j, k, result, allocsize;
490 UFSD(("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count))
493 uspi = UFS_SB(sb)->s_uspi;
494 usb1 = ubh_get_usb_first(uspi);
498 * 1. searching on preferred cylinder group
500 UFS_TEST_FREE_SPACE_CG
503 * 2. quadratic rehash
505 for (j = 1; j < uspi->s_ncg; j *= 2) {
507 if (cgno >= uspi->s_ncg)
509 UFS_TEST_FREE_SPACE_CG
513 * 3. brute force search
514 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
516 cgno = (oldcg + 1) % uspi->s_ncg;
517 for (j = 2; j < uspi->s_ncg; j++) {
519 if (cgno >= uspi->s_ncg)
521 UFS_TEST_FREE_SPACE_CG
524 UFSD(("EXIT (FAILED)\n"))
528 ucpi = ufs_load_cylinder (sb, cgno);
531 ucg = ubh_get_ucg (UCPI_UBH);
532 if (!ufs_cg_chkmagic(sb, ucg))
533 ufs_panic (sb, "ufs_alloc_fragments",
534 "internal error, bad magic number on cg %u", cgno);
535 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537 if (count == uspi->s_fpb) {
538 result = ufs_alloccg_block (inode, ucpi, goal, err);
539 if (result == (unsigned)-1)
544 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
545 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
548 if (allocsize == uspi->s_fpb) {
549 result = ufs_alloccg_block (inode, ucpi, goal, err);
550 if (result == (unsigned)-1)
552 goal = ufs_dtogd (result);
553 for (i = count; i < uspi->s_fpb; i++)
554 ubh_setbit (UCPI_UBH, ucpi->c_freeoff, goal + i);
555 i = uspi->s_fpb - count;
556 DQUOT_FREE_BLOCK(inode, i);
558 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
559 fs32_add(sb, &usb1->fs_cstotal.cs_nffree, i);
560 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
561 fs32_add(sb, &ucg->cg_frsum[i], 1);
565 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
566 if (result == (unsigned)-1)
568 if(DQUOT_ALLOC_BLOCK(inode, count)) {
572 for (i = 0; i < count; i++)
573 ubh_clrbit (UCPI_UBH, ucpi->c_freeoff, result + i);
575 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
576 fs32_sub(sb, &usb1->fs_cstotal.cs_nffree, count);
577 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
578 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
580 if (count != allocsize)
581 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
584 ubh_mark_buffer_dirty (USPI_UBH);
585 ubh_mark_buffer_dirty (UCPI_UBH);
586 if (sb->s_flags & MS_SYNCHRONOUS) {
587 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
588 ubh_wait_on_buffer (UCPI_UBH);
592 result += cgno * uspi->s_fpg;
593 UFSD(("EXIT3, result %u\n", result))
597 static unsigned ufs_alloccg_block (struct inode * inode,
598 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
600 struct super_block * sb;
601 struct ufs_sb_private_info * uspi;
602 struct ufs_super_block_first * usb1;
603 struct ufs_cylinder_group * ucg;
604 unsigned result, cylno, blkno;
606 UFSD(("ENTER, goal %u\n", goal))
609 uspi = UFS_SB(sb)->s_uspi;
610 usb1 = ubh_get_usb_first(uspi);
611 ucg = ubh_get_ucg(UCPI_UBH);
614 goal = ucpi->c_rotor;
617 goal = ufs_blknum (goal);
618 goal = ufs_dtogd (goal);
621 * If the requested block is available, use it.
623 if (ubh_isblockset(UCPI_UBH, ucpi->c_freeoff, ufs_fragstoblks(goal))) {
629 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
630 if (result == (unsigned)-1)
632 ucpi->c_rotor = result;
634 blkno = ufs_fragstoblks(result);
635 ubh_clrblock (UCPI_UBH, ucpi->c_freeoff, blkno);
636 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
637 ufs_clusteracct (sb, ucpi, blkno, -1);
638 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
643 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
644 fs32_sub(sb, &usb1->fs_cstotal.cs_nbfree, 1);
645 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
646 cylno = ufs_cbtocylno(result);
647 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
648 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
650 UFSD(("EXIT, result %u\n", result))
655 static unsigned ufs_bitmap_search (struct super_block * sb,
656 struct ufs_cg_private_info * ucpi, unsigned goal, unsigned count)
658 struct ufs_sb_private_info * uspi;
659 struct ufs_super_block_first * usb1;
660 struct ufs_cylinder_group * ucg;
661 unsigned start, length, location, result;
662 unsigned possition, fragsize, blockmap, mask;
664 UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
666 uspi = UFS_SB(sb)->s_uspi;
667 usb1 = ubh_get_usb_first (uspi);
668 ucg = ubh_get_ucg(UCPI_UBH);
671 start = ufs_dtogd(goal) >> 3;
673 start = ucpi->c_frotor >> 3;
675 length = ((uspi->s_fpg + 7) >> 3) - start;
676 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff + start, length,
677 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
678 1 << (count - 1 + (uspi->s_fpb & 7)));
681 location = ubh_scanc(UCPI_UBH, ucpi->c_freeoff, length,
682 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
683 1 << (count - 1 + (uspi->s_fpb & 7)));
685 ufs_error (sb, "ufs_bitmap_search",
686 "bitmap corrupted on cg %u, start %u, length %u, count %u, freeoff %u\n",
687 ucpi->c_cgx, start, length, count, ucpi->c_freeoff);
692 result = (start + length - location) << 3;
693 ucpi->c_frotor = result;
696 * found the byte in the map
698 blockmap = ubh_blkmap(UCPI_UBH, ucpi->c_freeoff, result);
700 for (possition = 0, mask = 1; possition < 8; possition++, mask <<= 1) {
701 if (blockmap & mask) {
702 if (!(possition & uspi->s_fpbmask))
708 if (fragsize == count) {
709 result += possition - count;
710 UFSD(("EXIT, result %u\n", result))
716 if (fragsize == count) {
717 result += possition - count;
718 UFSD(("EXIT, result %u\n", result))
721 ufs_error (sb, "ufs_bitmap_search", "block not in map on cg %u\n", ucpi->c_cgx);
722 UFSD(("EXIT (FAILED)\n"))
726 static void ufs_clusteracct(struct super_block * sb,
727 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
729 struct ufs_sb_private_info * uspi;
730 int i, start, end, forw, back;
732 uspi = UFS_SB(sb)->s_uspi;
733 if (uspi->s_contigsumsize <= 0)
737 ubh_setbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
739 ubh_clrbit(UCPI_UBH, ucpi->c_clusteroff, blkno);
742 * Find the size of the cluster going forward.
745 end = start + uspi->s_contigsumsize;
746 if ( end >= ucpi->c_nclusterblks)
747 end = ucpi->c_nclusterblks;
748 i = ubh_find_next_zero_bit (UCPI_UBH, ucpi->c_clusteroff, end, start);
754 * Find the size of the cluster going backward.
757 end = start - uspi->s_contigsumsize;
760 i = ubh_find_last_zero_bit (UCPI_UBH, ucpi->c_clusteroff, start, end);
766 * Account for old cluster and the possibly new forward and
770 if (i > uspi->s_contigsumsize)
771 i = uspi->s_contigsumsize;
772 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (i << 2)), cnt);
774 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (back << 2)), cnt);
776 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH, ucpi->c_clustersumoff + (forw << 2)), cnt);
780 static unsigned char ufs_fragtable_8fpb[] = {
781 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
782 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
783 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
784 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
785 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
786 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
787 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
788 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
789 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
790 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
791 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
792 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
793 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
794 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
795 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
796 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
799 static unsigned char ufs_fragtable_other[] = {
800 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
801 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
802 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
803 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
804 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
805 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
806 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
807 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
808 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
809 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
810 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
811 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
812 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
813 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
814 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
815 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,