2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
221 * Modify inode page cache in such way:
222 * have - blocks with b_blocknr equal to oldb...oldb+count-1
223 * get - blocks with b_blocknr equal to newb...newb+count-1
224 * also we suppose that oldb...oldb+count-1 blocks
225 * situated at the end of file.
227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it.
230 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page)
234 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
235 struct address_space *mapping = inode->i_mapping;
236 pgoff_t index, cur_index = locked_page->index;
239 struct buffer_head *head, *bh;
241 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 inode->i_ino, count, oldb, newb);
244 BUG_ON(!PageLocked(locked_page));
246 for (i = 0; i < count; i += blk_per_page) {
247 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
249 if (likely(cur_index != index)) {
250 page = ufs_get_locked_page(mapping, index);
251 if (!page || IS_ERR(page)) /* it was truncated or EIO */
257 head = page_buffers(page);
260 if (likely(bh->b_blocknr == j + oldb && j < count)) {
261 unmap_underlying_metadata(bh->b_bdev,
263 bh->b_blocknr = newb + j++;
264 mark_buffer_dirty(bh);
267 bh = bh->b_this_page;
268 } while (bh != head);
270 set_page_dirty(page);
272 if (likely(cur_index != index))
273 ufs_put_locked_page(page);
278 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
279 unsigned goal, unsigned count, int * err, struct page *locked_page)
281 struct super_block * sb;
282 struct ufs_sb_private_info * uspi;
283 struct ufs_super_block_first * usb1;
284 unsigned cgno, oldcount, newcount, tmp, request, result;
286 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
289 uspi = UFS_SB(sb)->s_uspi;
290 usb1 = ubh_get_usb_first(uspi);
295 tmp = fs32_to_cpu(sb, *p);
296 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
297 ufs_warning (sb, "ufs_new_fragments", "internal warning"
298 " fragment %u, count %u", fragment, count);
299 count = uspi->s_fpb - ufs_fragnum(fragment);
301 oldcount = ufs_fragnum (fragment);
302 newcount = oldcount + count;
305 * Somebody else has just allocated our fragments
309 ufs_error (sb, "ufs_new_fragments", "internal error, "
310 "fragment %u, tmp %u\n", fragment, tmp);
314 if (fragment < UFS_I(inode)->i_lastfrag) {
315 UFSD("EXIT (ALREADY ALLOCATED)\n");
322 UFSD("EXIT (ALREADY ALLOCATED)\n");
329 * There is not enough space for user on the device
331 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
333 UFSD("EXIT (FAILED)\n");
337 if (goal >= uspi->s_size)
340 cgno = ufs_inotocg (inode->i_ino);
342 cgno = ufs_dtog (goal);
345 * allocate new fragment
348 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
350 *p = cpu_to_fs32(sb, result);
352 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
355 UFSD("EXIT, result %u\n", result);
362 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
365 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
367 UFSD("EXIT, result %u\n", result);
372 * allocate new block and move data
374 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
377 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
378 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
380 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
383 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
386 request = uspi->s_fpb;
387 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
388 (uspi->s_minfree - 2) / 100)
390 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
393 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
395 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
396 result, locked_page);
398 *p = cpu_to_fs32(sb, result);
400 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
402 if (newcount < request)
403 ufs_free_fragments (inode, result + newcount, request - newcount);
404 ufs_free_fragments (inode, tmp, oldcount);
405 UFSD("EXIT, result %u\n", result);
410 UFSD("EXIT (FAILED)\n");
415 ufs_add_fragments (struct inode * inode, unsigned fragment,
416 unsigned oldcount, unsigned newcount, int * err)
418 struct super_block * sb;
419 struct ufs_sb_private_info * uspi;
420 struct ufs_super_block_first * usb1;
421 struct ufs_cg_private_info * ucpi;
422 struct ufs_cylinder_group * ucg;
423 unsigned cgno, fragno, fragoff, count, fragsize, i;
425 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
428 uspi = UFS_SB(sb)->s_uspi;
429 usb1 = ubh_get_usb_first (uspi);
430 count = newcount - oldcount;
432 cgno = ufs_dtog(fragment);
433 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
435 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
437 ucpi = ufs_load_cylinder (sb, cgno);
440 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
441 if (!ufs_cg_chkmagic(sb, ucg)) {
442 ufs_panic (sb, "ufs_add_fragments",
443 "internal error, bad magic number on cg %u", cgno);
447 fragno = ufs_dtogd (fragment);
448 fragoff = ufs_fragnum (fragno);
449 for (i = oldcount; i < newcount; i++)
450 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
453 * Block can be extended
455 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
456 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
457 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
459 fragsize = i - oldcount;
460 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
461 ufs_panic (sb, "ufs_add_fragments",
462 "internal error or corrupted bitmap on cg %u", cgno);
463 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
464 if (fragsize != count)
465 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
466 for (i = oldcount; i < newcount; i++)
467 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
468 if(DQUOT_ALLOC_BLOCK(inode, count)) {
473 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
474 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
475 uspi->cs_total.cs_nffree -= count;
477 ubh_mark_buffer_dirty (USPI_UBH(uspi));
478 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
479 if (sb->s_flags & MS_SYNCHRONOUS) {
480 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
481 ubh_wait_on_buffer (UCPI_UBH(ucpi));
485 UFSD("EXIT, fragment %u\n", fragment);
490 #define UFS_TEST_FREE_SPACE_CG \
491 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
492 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
494 for (k = count; k < uspi->s_fpb; k++) \
495 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
498 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
499 unsigned goal, unsigned count, int * err)
501 struct super_block * sb;
502 struct ufs_sb_private_info * uspi;
503 struct ufs_super_block_first * usb1;
504 struct ufs_cg_private_info * ucpi;
505 struct ufs_cylinder_group * ucg;
506 unsigned oldcg, i, j, k, result, allocsize;
508 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
511 uspi = UFS_SB(sb)->s_uspi;
512 usb1 = ubh_get_usb_first(uspi);
516 * 1. searching on preferred cylinder group
518 UFS_TEST_FREE_SPACE_CG
521 * 2. quadratic rehash
523 for (j = 1; j < uspi->s_ncg; j *= 2) {
525 if (cgno >= uspi->s_ncg)
527 UFS_TEST_FREE_SPACE_CG
531 * 3. brute force search
532 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
534 cgno = (oldcg + 1) % uspi->s_ncg;
535 for (j = 2; j < uspi->s_ncg; j++) {
537 if (cgno >= uspi->s_ncg)
539 UFS_TEST_FREE_SPACE_CG
542 UFSD("EXIT (FAILED)\n");
546 ucpi = ufs_load_cylinder (sb, cgno);
549 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
550 if (!ufs_cg_chkmagic(sb, ucg))
551 ufs_panic (sb, "ufs_alloc_fragments",
552 "internal error, bad magic number on cg %u", cgno);
553 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
555 if (count == uspi->s_fpb) {
556 result = ufs_alloccg_block (inode, ucpi, goal, err);
557 if (result == (unsigned)-1)
562 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
563 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
566 if (allocsize == uspi->s_fpb) {
567 result = ufs_alloccg_block (inode, ucpi, goal, err);
568 if (result == (unsigned)-1)
570 goal = ufs_dtogd (result);
571 for (i = count; i < uspi->s_fpb; i++)
572 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
573 i = uspi->s_fpb - count;
574 DQUOT_FREE_BLOCK(inode, i);
576 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
577 uspi->cs_total.cs_nffree += i;
578 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
579 fs32_add(sb, &ucg->cg_frsum[i], 1);
583 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
584 if (result == (unsigned)-1)
586 if(DQUOT_ALLOC_BLOCK(inode, count)) {
590 for (i = 0; i < count; i++)
591 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
593 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
594 uspi->cs_total.cs_nffree -= count;
595 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
596 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
598 if (count != allocsize)
599 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
602 ubh_mark_buffer_dirty (USPI_UBH(uspi));
603 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
604 if (sb->s_flags & MS_SYNCHRONOUS) {
605 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
606 ubh_wait_on_buffer (UCPI_UBH(ucpi));
610 result += cgno * uspi->s_fpg;
611 UFSD("EXIT3, result %u\n", result);
615 static unsigned ufs_alloccg_block (struct inode * inode,
616 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
618 struct super_block * sb;
619 struct ufs_sb_private_info * uspi;
620 struct ufs_super_block_first * usb1;
621 struct ufs_cylinder_group * ucg;
622 unsigned result, cylno, blkno;
624 UFSD("ENTER, goal %u\n", goal);
627 uspi = UFS_SB(sb)->s_uspi;
628 usb1 = ubh_get_usb_first(uspi);
629 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
632 goal = ucpi->c_rotor;
635 goal = ufs_blknum (goal);
636 goal = ufs_dtogd (goal);
639 * If the requested block is available, use it.
641 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
647 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
648 if (result == (unsigned)-1)
650 ucpi->c_rotor = result;
652 blkno = ufs_fragstoblks(result);
653 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
654 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
655 ufs_clusteracct (sb, ucpi, blkno, -1);
656 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
661 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
662 uspi->cs_total.cs_nbfree--;
663 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
664 cylno = ufs_cbtocylno(result);
665 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
666 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
668 UFSD("EXIT, result %u\n", result);
673 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
674 struct ufs_buffer_head *ubh,
675 unsigned begin, unsigned size,
676 unsigned char *table, unsigned char mask)
678 unsigned rest, offset;
682 offset = begin & ~uspi->s_fmask;
683 begin >>= uspi->s_fshift;
685 if ((offset + size) < uspi->s_fsize)
688 rest = uspi->s_fsize - offset;
690 cp = ubh->bh[begin]->b_data + offset;
691 while ((table[*cp++] & mask) == 0 && --rest)
698 return (size + rest);
702 * Find a block of the specified size in the specified cylinder group.
703 * @sp: pointer to super block
704 * @ucpi: pointer to cylinder group info
705 * @goal: near which block we want find new one
706 * @count: specified size
708 static unsigned ufs_bitmap_search(struct super_block *sb,
709 struct ufs_cg_private_info *ucpi,
710 unsigned goal, unsigned count)
713 * Bit patterns for identifying fragments in the block map
714 * used as ((map & mask_arr) == want_arr)
716 static const int mask_arr[9] = {
717 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
719 static const int want_arr[9] = {
720 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
722 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
723 struct ufs_super_block_first *usb1;
724 struct ufs_cylinder_group *ucg;
725 unsigned start, length, loc, result;
726 unsigned pos, want, blockmap, mask, end;
728 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
730 usb1 = ubh_get_usb_first (uspi);
731 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
734 start = ufs_dtogd(goal) >> 3;
736 start = ucpi->c_frotor >> 3;
738 length = ((uspi->s_fpg + 7) >> 3) - start;
739 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
740 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
741 1 << (count - 1 + (uspi->s_fpb & 7)));
744 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
745 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
747 1 << (count - 1 + (uspi->s_fpb & 7)));
749 ufs_error(sb, "ufs_bitmap_search",
750 "bitmap corrupted on cg %u, start %u,"
751 " length %u, count %u, freeoff %u\n",
752 ucpi->c_cgx, start, length, count,
758 result = (start + length - loc) << 3;
759 ucpi->c_frotor = result;
762 * found the byte in the map
765 for (end = result + 8; result < end; result += uspi->s_fpb) {
766 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
768 mask = mask_arr[count];
769 want = want_arr[count];
770 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
771 if ((blockmap & mask) == want) {
772 UFSD("EXIT, result %u\n", result);
780 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
782 UFSD("EXIT (FAILED)\n");
786 static void ufs_clusteracct(struct super_block * sb,
787 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
789 struct ufs_sb_private_info * uspi;
790 int i, start, end, forw, back;
792 uspi = UFS_SB(sb)->s_uspi;
793 if (uspi->s_contigsumsize <= 0)
797 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
799 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
802 * Find the size of the cluster going forward.
805 end = start + uspi->s_contigsumsize;
806 if ( end >= ucpi->c_nclusterblks)
807 end = ucpi->c_nclusterblks;
808 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
814 * Find the size of the cluster going backward.
817 end = start - uspi->s_contigsumsize;
820 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
826 * Account for old cluster and the possibly new forward and
830 if (i > uspi->s_contigsumsize)
831 i = uspi->s_contigsumsize;
832 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
834 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
836 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
840 static unsigned char ufs_fragtable_8fpb[] = {
841 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
842 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
843 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
844 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
845 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
846 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
847 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
848 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
849 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
850 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
851 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
852 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
853 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
854 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
855 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
856 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
859 static unsigned char ufs_fragtable_other[] = {
860 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
861 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
862 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
863 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
864 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
865 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
866 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
867 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
868 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
869 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
870 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
871 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
872 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
873 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
874 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
875 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,