2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
221 * Modify inode page cache in such way:
222 * have - blocks with b_blocknr equal to oldb...oldb+count-1
223 * get - blocks with b_blocknr equal to newb...newb+count-1
224 * also we suppose that oldb...oldb+count-1 blocks
225 * situated at the end of file.
227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it.
230 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page)
234 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
235 struct address_space *mapping = inode->i_mapping;
236 pgoff_t index, cur_index = locked_page->index;
239 struct buffer_head *head, *bh;
241 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 inode->i_ino, count, oldb, newb);
244 BUG_ON(!PageLocked(locked_page));
246 for (i = 0; i < count; i += blk_per_page) {
247 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
249 if (likely(cur_index != index)) {
250 page = ufs_get_locked_page(mapping, index);
251 if (!page || IS_ERR(page)) /* it was truncated or EIO */
257 head = page_buffers(page);
260 if (likely(bh->b_blocknr == j + oldb && j < count)) {
261 unmap_underlying_metadata(bh->b_bdev,
263 bh->b_blocknr = newb + j++;
264 mark_buffer_dirty(bh);
267 bh = bh->b_this_page;
268 } while (bh != head);
270 set_page_dirty(page);
272 if (likely(cur_index != index))
273 ufs_put_locked_page(page);
278 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
281 struct buffer_head *bh;
282 sector_t end = beg + n;
284 for (; beg < end; ++beg) {
285 bh = sb_getblk(inode->i_sb, beg);
287 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
288 set_buffer_uptodate(bh);
289 mark_buffer_dirty(bh);
291 if (IS_SYNC(inode) || sync)
292 sync_dirty_buffer(bh);
297 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
298 unsigned goal, unsigned count, int * err, struct page *locked_page)
300 struct super_block * sb;
301 struct ufs_sb_private_info * uspi;
302 struct ufs_super_block_first * usb1;
303 unsigned cgno, oldcount, newcount, tmp, request, result;
305 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
308 uspi = UFS_SB(sb)->s_uspi;
309 usb1 = ubh_get_usb_first(uspi);
314 tmp = fs32_to_cpu(sb, *p);
315 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
316 ufs_warning (sb, "ufs_new_fragments", "internal warning"
317 " fragment %u, count %u", fragment, count);
318 count = uspi->s_fpb - ufs_fragnum(fragment);
320 oldcount = ufs_fragnum (fragment);
321 newcount = oldcount + count;
324 * Somebody else has just allocated our fragments
328 ufs_error (sb, "ufs_new_fragments", "internal error, "
329 "fragment %u, tmp %u\n", fragment, tmp);
333 if (fragment < UFS_I(inode)->i_lastfrag) {
334 UFSD("EXIT (ALREADY ALLOCATED)\n");
341 UFSD("EXIT (ALREADY ALLOCATED)\n");
348 * There is not enough space for user on the device
350 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
352 UFSD("EXIT (FAILED)\n");
356 if (goal >= uspi->s_size)
359 cgno = ufs_inotocg (inode->i_ino);
361 cgno = ufs_dtog (goal);
364 * allocate new fragment
367 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
369 *p = cpu_to_fs32(sb, result);
371 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
372 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
373 locked_page != NULL);
376 UFSD("EXIT, result %u\n", result);
383 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
386 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
387 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
388 locked_page != NULL);
390 UFSD("EXIT, result %u\n", result);
395 * allocate new block and move data
397 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
400 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
401 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
403 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
406 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
409 request = uspi->s_fpb;
410 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
411 (uspi->s_minfree - 2) / 100)
413 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
416 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
418 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
419 result, locked_page);
421 *p = cpu_to_fs32(sb, result);
423 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
424 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
425 locked_page != NULL);
427 if (newcount < request)
428 ufs_free_fragments (inode, result + newcount, request - newcount);
429 ufs_free_fragments (inode, tmp, oldcount);
430 UFSD("EXIT, result %u\n", result);
435 UFSD("EXIT (FAILED)\n");
440 ufs_add_fragments (struct inode * inode, unsigned fragment,
441 unsigned oldcount, unsigned newcount, int * err)
443 struct super_block * sb;
444 struct ufs_sb_private_info * uspi;
445 struct ufs_super_block_first * usb1;
446 struct ufs_cg_private_info * ucpi;
447 struct ufs_cylinder_group * ucg;
448 unsigned cgno, fragno, fragoff, count, fragsize, i;
450 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
453 uspi = UFS_SB(sb)->s_uspi;
454 usb1 = ubh_get_usb_first (uspi);
455 count = newcount - oldcount;
457 cgno = ufs_dtog(fragment);
458 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
460 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
462 ucpi = ufs_load_cylinder (sb, cgno);
465 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
466 if (!ufs_cg_chkmagic(sb, ucg)) {
467 ufs_panic (sb, "ufs_add_fragments",
468 "internal error, bad magic number on cg %u", cgno);
472 fragno = ufs_dtogd (fragment);
473 fragoff = ufs_fragnum (fragno);
474 for (i = oldcount; i < newcount; i++)
475 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
478 * Block can be extended
480 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
481 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
482 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
484 fragsize = i - oldcount;
485 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
486 ufs_panic (sb, "ufs_add_fragments",
487 "internal error or corrupted bitmap on cg %u", cgno);
488 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
489 if (fragsize != count)
490 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
491 for (i = oldcount; i < newcount; i++)
492 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
493 if(DQUOT_ALLOC_BLOCK(inode, count)) {
498 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
499 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
500 uspi->cs_total.cs_nffree -= count;
502 ubh_mark_buffer_dirty (USPI_UBH(uspi));
503 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
504 if (sb->s_flags & MS_SYNCHRONOUS) {
505 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
506 ubh_wait_on_buffer (UCPI_UBH(ucpi));
510 UFSD("EXIT, fragment %u\n", fragment);
515 #define UFS_TEST_FREE_SPACE_CG \
516 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
517 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
519 for (k = count; k < uspi->s_fpb; k++) \
520 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
523 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
524 unsigned goal, unsigned count, int * err)
526 struct super_block * sb;
527 struct ufs_sb_private_info * uspi;
528 struct ufs_super_block_first * usb1;
529 struct ufs_cg_private_info * ucpi;
530 struct ufs_cylinder_group * ucg;
531 unsigned oldcg, i, j, k, result, allocsize;
533 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
536 uspi = UFS_SB(sb)->s_uspi;
537 usb1 = ubh_get_usb_first(uspi);
541 * 1. searching on preferred cylinder group
543 UFS_TEST_FREE_SPACE_CG
546 * 2. quadratic rehash
548 for (j = 1; j < uspi->s_ncg; j *= 2) {
550 if (cgno >= uspi->s_ncg)
552 UFS_TEST_FREE_SPACE_CG
556 * 3. brute force search
557 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
559 cgno = (oldcg + 1) % uspi->s_ncg;
560 for (j = 2; j < uspi->s_ncg; j++) {
562 if (cgno >= uspi->s_ncg)
564 UFS_TEST_FREE_SPACE_CG
567 UFSD("EXIT (FAILED)\n");
571 ucpi = ufs_load_cylinder (sb, cgno);
574 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
575 if (!ufs_cg_chkmagic(sb, ucg))
576 ufs_panic (sb, "ufs_alloc_fragments",
577 "internal error, bad magic number on cg %u", cgno);
578 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
580 if (count == uspi->s_fpb) {
581 result = ufs_alloccg_block (inode, ucpi, goal, err);
582 if (result == (unsigned)-1)
587 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
588 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
591 if (allocsize == uspi->s_fpb) {
592 result = ufs_alloccg_block (inode, ucpi, goal, err);
593 if (result == (unsigned)-1)
595 goal = ufs_dtogd (result);
596 for (i = count; i < uspi->s_fpb; i++)
597 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
598 i = uspi->s_fpb - count;
599 DQUOT_FREE_BLOCK(inode, i);
601 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
602 uspi->cs_total.cs_nffree += i;
603 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
604 fs32_add(sb, &ucg->cg_frsum[i], 1);
608 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
609 if (result == (unsigned)-1)
611 if(DQUOT_ALLOC_BLOCK(inode, count)) {
615 for (i = 0; i < count; i++)
616 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
618 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
619 uspi->cs_total.cs_nffree -= count;
620 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
621 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
623 if (count != allocsize)
624 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
627 ubh_mark_buffer_dirty (USPI_UBH(uspi));
628 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
629 if (sb->s_flags & MS_SYNCHRONOUS) {
630 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
631 ubh_wait_on_buffer (UCPI_UBH(ucpi));
635 result += cgno * uspi->s_fpg;
636 UFSD("EXIT3, result %u\n", result);
640 static unsigned ufs_alloccg_block (struct inode * inode,
641 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
643 struct super_block * sb;
644 struct ufs_sb_private_info * uspi;
645 struct ufs_super_block_first * usb1;
646 struct ufs_cylinder_group * ucg;
647 unsigned result, cylno, blkno;
649 UFSD("ENTER, goal %u\n", goal);
652 uspi = UFS_SB(sb)->s_uspi;
653 usb1 = ubh_get_usb_first(uspi);
654 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
657 goal = ucpi->c_rotor;
660 goal = ufs_blknum (goal);
661 goal = ufs_dtogd (goal);
664 * If the requested block is available, use it.
666 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
672 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
673 if (result == (unsigned)-1)
675 ucpi->c_rotor = result;
677 blkno = ufs_fragstoblks(result);
678 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
679 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
680 ufs_clusteracct (sb, ucpi, blkno, -1);
681 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
686 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
687 uspi->cs_total.cs_nbfree--;
688 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
689 cylno = ufs_cbtocylno(result);
690 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
691 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
693 UFSD("EXIT, result %u\n", result);
698 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
699 struct ufs_buffer_head *ubh,
700 unsigned begin, unsigned size,
701 unsigned char *table, unsigned char mask)
703 unsigned rest, offset;
707 offset = begin & ~uspi->s_fmask;
708 begin >>= uspi->s_fshift;
710 if ((offset + size) < uspi->s_fsize)
713 rest = uspi->s_fsize - offset;
715 cp = ubh->bh[begin]->b_data + offset;
716 while ((table[*cp++] & mask) == 0 && --rest)
723 return (size + rest);
727 * Find a block of the specified size in the specified cylinder group.
728 * @sp: pointer to super block
729 * @ucpi: pointer to cylinder group info
730 * @goal: near which block we want find new one
731 * @count: specified size
733 static unsigned ufs_bitmap_search(struct super_block *sb,
734 struct ufs_cg_private_info *ucpi,
735 unsigned goal, unsigned count)
738 * Bit patterns for identifying fragments in the block map
739 * used as ((map & mask_arr) == want_arr)
741 static const int mask_arr[9] = {
742 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
744 static const int want_arr[9] = {
745 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
747 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
748 struct ufs_super_block_first *usb1;
749 struct ufs_cylinder_group *ucg;
750 unsigned start, length, loc, result;
751 unsigned pos, want, blockmap, mask, end;
753 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
755 usb1 = ubh_get_usb_first (uspi);
756 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
759 start = ufs_dtogd(goal) >> 3;
761 start = ucpi->c_frotor >> 3;
763 length = ((uspi->s_fpg + 7) >> 3) - start;
764 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
765 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
766 1 << (count - 1 + (uspi->s_fpb & 7)));
769 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
770 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
772 1 << (count - 1 + (uspi->s_fpb & 7)));
774 ufs_error(sb, "ufs_bitmap_search",
775 "bitmap corrupted on cg %u, start %u,"
776 " length %u, count %u, freeoff %u\n",
777 ucpi->c_cgx, start, length, count,
783 result = (start + length - loc) << 3;
784 ucpi->c_frotor = result;
787 * found the byte in the map
790 for (end = result + 8; result < end; result += uspi->s_fpb) {
791 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
793 mask = mask_arr[count];
794 want = want_arr[count];
795 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
796 if ((blockmap & mask) == want) {
797 UFSD("EXIT, result %u\n", result);
805 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
807 UFSD("EXIT (FAILED)\n");
811 static void ufs_clusteracct(struct super_block * sb,
812 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
814 struct ufs_sb_private_info * uspi;
815 int i, start, end, forw, back;
817 uspi = UFS_SB(sb)->s_uspi;
818 if (uspi->s_contigsumsize <= 0)
822 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
824 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
827 * Find the size of the cluster going forward.
830 end = start + uspi->s_contigsumsize;
831 if ( end >= ucpi->c_nclusterblks)
832 end = ucpi->c_nclusterblks;
833 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
839 * Find the size of the cluster going backward.
842 end = start - uspi->s_contigsumsize;
845 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
851 * Account for old cluster and the possibly new forward and
855 if (i > uspi->s_contigsumsize)
856 i = uspi->s_contigsumsize;
857 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
859 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
861 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
865 static unsigned char ufs_fragtable_8fpb[] = {
866 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
867 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
868 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
869 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
870 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
871 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
872 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
873 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
874 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
875 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
876 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
877 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
878 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
879 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
880 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
881 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
884 static unsigned char ufs_fragtable_other[] = {
885 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
886 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
887 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
888 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
889 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
890 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
891 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
892 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
893 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
894 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
895 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
896 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
897 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
898 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
899 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
900 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,