2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
220 static struct page *ufs_get_locked_page(struct address_space *mapping,
226 page = find_lock_page(mapping, index);
228 page = read_cache_page(mapping, index,
229 (filler_t*)mapping->a_ops->readpage,
232 printk(KERN_ERR "ufs_change_blocknr: "
233 "read_cache_page error: ino %lu, index: %lu\n",
234 mapping->host->i_ino, index);
240 if (!PageUptodate(page) || PageError(page)) {
242 page_cache_release(page);
244 printk(KERN_ERR "ufs_change_blocknr: "
245 "can not read page: ino %lu, index: %lu\n",
246 mapping->host->i_ino, index);
248 page = ERR_PTR(-EIO);
253 if (unlikely(!page->mapping || !page_has_buffers(page))) {
255 page_cache_release(page);
256 goto try_again;/*we really need these buffers*/
263 * Modify inode page cache in such way:
264 * have - blocks with b_blocknr equal to oldb...oldb+count-1
265 * get - blocks with b_blocknr equal to newb...newb+count-1
266 * also we suppose that oldb...oldb+count-1 blocks
267 * situated at the end of file.
269 * We can come here from ufs_writepage or ufs_prepare_write,
270 * locked_page is argument of these functions, so we already lock it.
272 static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
273 unsigned int count, unsigned int oldb,
274 unsigned int newb, struct page *locked_page)
276 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
277 struct address_space *mapping = inode->i_mapping;
278 pgoff_t index, cur_index = locked_page->index;
281 struct buffer_head *head, *bh;
283 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
284 inode->i_ino, count, oldb, newb);
286 BUG_ON(!PageLocked(locked_page));
288 for (i = 0; i < count; i += blk_per_page) {
289 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
291 if (likely(cur_index != index)) {
292 page = ufs_get_locked_page(mapping, index);
299 head = page_buffers(page);
302 if (likely(bh->b_blocknr == j + oldb && j < count)) {
303 unmap_underlying_metadata(bh->b_bdev,
305 bh->b_blocknr = newb + j++;
306 mark_buffer_dirty(bh);
309 bh = bh->b_this_page;
310 } while (bh != head);
312 set_page_dirty(page);
314 if (likely(cur_index != index)) {
316 page_cache_release(page);
322 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
323 unsigned goal, unsigned count, int * err, struct page *locked_page)
325 struct super_block * sb;
326 struct ufs_sb_private_info * uspi;
327 struct ufs_super_block_first * usb1;
328 unsigned cgno, oldcount, newcount, tmp, request, result;
330 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
333 uspi = UFS_SB(sb)->s_uspi;
334 usb1 = ubh_get_usb_first(uspi);
339 tmp = fs32_to_cpu(sb, *p);
340 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
341 ufs_warning (sb, "ufs_new_fragments", "internal warning"
342 " fragment %u, count %u", fragment, count);
343 count = uspi->s_fpb - ufs_fragnum(fragment);
345 oldcount = ufs_fragnum (fragment);
346 newcount = oldcount + count;
349 * Somebody else has just allocated our fragments
353 ufs_error (sb, "ufs_new_fragments", "internal error, "
354 "fragment %u, tmp %u\n", fragment, tmp);
358 if (fragment < UFS_I(inode)->i_lastfrag) {
359 UFSD("EXIT (ALREADY ALLOCATED)\n");
366 UFSD("EXIT (ALREADY ALLOCATED)\n");
373 * There is not enough space for user on the device
375 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
377 UFSD("EXIT (FAILED)\n");
381 if (goal >= uspi->s_size)
384 cgno = ufs_inotocg (inode->i_ino);
386 cgno = ufs_dtog (goal);
389 * allocate new fragment
392 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
394 *p = cpu_to_fs32(sb, result);
396 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
399 UFSD("EXIT, result %u\n", result);
406 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
409 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
411 UFSD("EXIT, result %u\n", result);
416 * allocate new block and move data
418 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
421 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
422 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
424 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
427 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
430 request = uspi->s_fpb;
431 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
432 (uspi->s_minfree - 2) / 100)
434 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
437 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
439 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
440 result, locked_page);
442 *p = cpu_to_fs32(sb, result);
444 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
446 if (newcount < request)
447 ufs_free_fragments (inode, result + newcount, request - newcount);
448 ufs_free_fragments (inode, tmp, oldcount);
449 UFSD("EXIT, result %u\n", result);
454 UFSD("EXIT (FAILED)\n");
459 ufs_add_fragments (struct inode * inode, unsigned fragment,
460 unsigned oldcount, unsigned newcount, int * err)
462 struct super_block * sb;
463 struct ufs_sb_private_info * uspi;
464 struct ufs_super_block_first * usb1;
465 struct ufs_cg_private_info * ucpi;
466 struct ufs_cylinder_group * ucg;
467 unsigned cgno, fragno, fragoff, count, fragsize, i;
469 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
472 uspi = UFS_SB(sb)->s_uspi;
473 usb1 = ubh_get_usb_first (uspi);
474 count = newcount - oldcount;
476 cgno = ufs_dtog(fragment);
477 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
479 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
481 ucpi = ufs_load_cylinder (sb, cgno);
484 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
485 if (!ufs_cg_chkmagic(sb, ucg)) {
486 ufs_panic (sb, "ufs_add_fragments",
487 "internal error, bad magic number on cg %u", cgno);
491 fragno = ufs_dtogd (fragment);
492 fragoff = ufs_fragnum (fragno);
493 for (i = oldcount; i < newcount; i++)
494 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
497 * Block can be extended
499 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
500 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
501 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
503 fragsize = i - oldcount;
504 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
505 ufs_panic (sb, "ufs_add_fragments",
506 "internal error or corrupted bitmap on cg %u", cgno);
507 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
508 if (fragsize != count)
509 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
510 for (i = oldcount; i < newcount; i++)
511 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
512 if(DQUOT_ALLOC_BLOCK(inode, count)) {
517 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
518 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
519 uspi->cs_total.cs_nffree -= count;
521 ubh_mark_buffer_dirty (USPI_UBH(uspi));
522 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
523 if (sb->s_flags & MS_SYNCHRONOUS) {
524 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
525 ubh_wait_on_buffer (UCPI_UBH(ucpi));
529 UFSD("EXIT, fragment %u\n", fragment);
534 #define UFS_TEST_FREE_SPACE_CG \
535 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
536 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
538 for (k = count; k < uspi->s_fpb; k++) \
539 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
542 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
543 unsigned goal, unsigned count, int * err)
545 struct super_block * sb;
546 struct ufs_sb_private_info * uspi;
547 struct ufs_super_block_first * usb1;
548 struct ufs_cg_private_info * ucpi;
549 struct ufs_cylinder_group * ucg;
550 unsigned oldcg, i, j, k, result, allocsize;
552 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
555 uspi = UFS_SB(sb)->s_uspi;
556 usb1 = ubh_get_usb_first(uspi);
560 * 1. searching on preferred cylinder group
562 UFS_TEST_FREE_SPACE_CG
565 * 2. quadratic rehash
567 for (j = 1; j < uspi->s_ncg; j *= 2) {
569 if (cgno >= uspi->s_ncg)
571 UFS_TEST_FREE_SPACE_CG
575 * 3. brute force search
576 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
578 cgno = (oldcg + 1) % uspi->s_ncg;
579 for (j = 2; j < uspi->s_ncg; j++) {
581 if (cgno >= uspi->s_ncg)
583 UFS_TEST_FREE_SPACE_CG
586 UFSD("EXIT (FAILED)\n");
590 ucpi = ufs_load_cylinder (sb, cgno);
593 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
594 if (!ufs_cg_chkmagic(sb, ucg))
595 ufs_panic (sb, "ufs_alloc_fragments",
596 "internal error, bad magic number on cg %u", cgno);
597 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
599 if (count == uspi->s_fpb) {
600 result = ufs_alloccg_block (inode, ucpi, goal, err);
601 if (result == (unsigned)-1)
606 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
607 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
610 if (allocsize == uspi->s_fpb) {
611 result = ufs_alloccg_block (inode, ucpi, goal, err);
612 if (result == (unsigned)-1)
614 goal = ufs_dtogd (result);
615 for (i = count; i < uspi->s_fpb; i++)
616 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
617 i = uspi->s_fpb - count;
618 DQUOT_FREE_BLOCK(inode, i);
620 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
621 uspi->cs_total.cs_nffree += i;
622 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
623 fs32_add(sb, &ucg->cg_frsum[i], 1);
627 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
628 if (result == (unsigned)-1)
630 if(DQUOT_ALLOC_BLOCK(inode, count)) {
634 for (i = 0; i < count; i++)
635 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
637 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
638 uspi->cs_total.cs_nffree -= count;
639 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
640 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
642 if (count != allocsize)
643 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
646 ubh_mark_buffer_dirty (USPI_UBH(uspi));
647 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
648 if (sb->s_flags & MS_SYNCHRONOUS) {
649 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
650 ubh_wait_on_buffer (UCPI_UBH(ucpi));
654 result += cgno * uspi->s_fpg;
655 UFSD("EXIT3, result %u\n", result);
659 static unsigned ufs_alloccg_block (struct inode * inode,
660 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
662 struct super_block * sb;
663 struct ufs_sb_private_info * uspi;
664 struct ufs_super_block_first * usb1;
665 struct ufs_cylinder_group * ucg;
666 unsigned result, cylno, blkno;
668 UFSD("ENTER, goal %u\n", goal);
671 uspi = UFS_SB(sb)->s_uspi;
672 usb1 = ubh_get_usb_first(uspi);
673 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
676 goal = ucpi->c_rotor;
679 goal = ufs_blknum (goal);
680 goal = ufs_dtogd (goal);
683 * If the requested block is available, use it.
685 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
691 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
692 if (result == (unsigned)-1)
694 ucpi->c_rotor = result;
696 blkno = ufs_fragstoblks(result);
697 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
698 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
699 ufs_clusteracct (sb, ucpi, blkno, -1);
700 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
705 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
706 uspi->cs_total.cs_nbfree--;
707 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
708 cylno = ufs_cbtocylno(result);
709 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
710 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
712 UFSD("EXIT, result %u\n", result);
717 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
718 struct ufs_buffer_head *ubh,
719 unsigned begin, unsigned size,
720 unsigned char *table, unsigned char mask)
722 unsigned rest, offset;
726 offset = begin & ~uspi->s_fmask;
727 begin >>= uspi->s_fshift;
729 if ((offset + size) < uspi->s_fsize)
732 rest = uspi->s_fsize - offset;
734 cp = ubh->bh[begin]->b_data + offset;
735 while ((table[*cp++] & mask) == 0 && --rest)
742 return (size + rest);
746 * Find a block of the specified size in the specified cylinder group.
747 * @sp: pointer to super block
748 * @ucpi: pointer to cylinder group info
749 * @goal: near which block we want find new one
750 * @count: specified size
752 static unsigned ufs_bitmap_search(struct super_block *sb,
753 struct ufs_cg_private_info *ucpi,
754 unsigned goal, unsigned count)
757 * Bit patterns for identifying fragments in the block map
758 * used as ((map & mask_arr) == want_arr)
760 static const int mask_arr[9] = {
761 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
763 static const int want_arr[9] = {
764 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
766 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
767 struct ufs_super_block_first *usb1;
768 struct ufs_cylinder_group *ucg;
769 unsigned start, length, loc, result;
770 unsigned pos, want, blockmap, mask, end;
772 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
774 usb1 = ubh_get_usb_first (uspi);
775 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
778 start = ufs_dtogd(goal) >> 3;
780 start = ucpi->c_frotor >> 3;
782 length = ((uspi->s_fpg + 7) >> 3) - start;
783 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
784 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
785 1 << (count - 1 + (uspi->s_fpb & 7)));
788 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
789 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
791 1 << (count - 1 + (uspi->s_fpb & 7)));
793 ufs_error(sb, "ufs_bitmap_search",
794 "bitmap corrupted on cg %u, start %u,"
795 " length %u, count %u, freeoff %u\n",
796 ucpi->c_cgx, start, length, count,
802 result = (start + length - loc) << 3;
803 ucpi->c_frotor = result;
806 * found the byte in the map
809 for (end = result + 8; result < end; result += uspi->s_fpb) {
810 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
812 mask = mask_arr[count];
813 want = want_arr[count];
814 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
815 if ((blockmap & mask) == want) {
816 UFSD("EXIT, result %u\n", result);
824 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
826 UFSD("EXIT (FAILED)\n");
830 static void ufs_clusteracct(struct super_block * sb,
831 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
833 struct ufs_sb_private_info * uspi;
834 int i, start, end, forw, back;
836 uspi = UFS_SB(sb)->s_uspi;
837 if (uspi->s_contigsumsize <= 0)
841 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
843 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
846 * Find the size of the cluster going forward.
849 end = start + uspi->s_contigsumsize;
850 if ( end >= ucpi->c_nclusterblks)
851 end = ucpi->c_nclusterblks;
852 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
858 * Find the size of the cluster going backward.
861 end = start - uspi->s_contigsumsize;
864 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
870 * Account for old cluster and the possibly new forward and
874 if (i > uspi->s_contigsumsize)
875 i = uspi->s_contigsumsize;
876 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
878 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
880 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
884 static unsigned char ufs_fragtable_8fpb[] = {
885 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
886 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
887 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
888 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
889 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
890 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
891 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
892 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
893 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
894 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
895 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
896 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
897 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
898 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
899 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
900 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
903 static unsigned char ufs_fragtable_other[] = {
904 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
905 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
906 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
907 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
908 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
909 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
910 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
911 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
912 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
913 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
914 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
915 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
916 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
917 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
918 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
919 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,