2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 #include <linux/ufs_fs.h>
11 #include <linux/stat.h>
12 #include <linux/time.h>
13 #include <linux/string.h>
14 #include <linux/quotaops.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/sched.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
24 static unsigned ufs_add_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
25 static unsigned ufs_alloc_fragments (struct inode *, unsigned, unsigned, unsigned, int *);
26 static unsigned ufs_alloccg_block (struct inode *, struct ufs_cg_private_info *, unsigned, int *);
27 static unsigned ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, unsigned, unsigned);
28 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
29 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
32 * Free 'count' fragments from fragment number 'fragment'
34 void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
36 struct super_block * sb;
37 struct ufs_sb_private_info * uspi;
38 struct ufs_super_block_first * usb1;
39 struct ufs_cg_private_info * ucpi;
40 struct ufs_cylinder_group * ucg;
41 unsigned cgno, bit, end_bit, bbase, blkmap, i, blkno, cylno;
44 uspi = UFS_SB(sb)->s_uspi;
45 usb1 = ubh_get_usb_first(uspi);
47 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
49 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
50 ufs_error (sb, "ufs_free_fragments", "internal error");
54 cgno = ufs_dtog(fragment);
55 bit = ufs_dtogd(fragment);
56 if (cgno >= uspi->s_ncg) {
57 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 ucpi = ufs_load_cylinder (sb, cgno);
64 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
65 if (!ufs_cg_chkmagic(sb, ucg)) {
66 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 end_bit = bit + count;
71 bbase = ufs_blknum (bit);
72 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
73 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
74 for (i = bit; i < end_bit; i++) {
75 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
76 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
78 ufs_error (sb, "ufs_free_fragments",
79 "bit already cleared for fragment %u", i);
82 DQUOT_FREE_BLOCK (inode, count);
85 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 uspi->cs_total.cs_nffree += count;
87 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 * Trying to reassemble free fragments into block
94 blkno = ufs_fragstoblks (bbase);
95 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 ufs_clusteracct (sb, ucpi, blkno, 1);
101 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 uspi->cs_total.cs_nbfree++;
103 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 cylno = ufs_cbtocylno (bbase);
105 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(bbase)), 1);
106 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
109 ubh_mark_buffer_dirty (USPI_UBH(uspi));
110 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
111 if (sb->s_flags & MS_SYNCHRONOUS) {
112 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
113 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123 UFSD("EXIT (FAILED)\n");
128 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
130 void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
132 struct super_block * sb;
133 struct ufs_sb_private_info * uspi;
134 struct ufs_super_block_first * usb1;
135 struct ufs_cg_private_info * ucpi;
136 struct ufs_cylinder_group * ucg;
137 unsigned overflow, cgno, bit, end_bit, blkno, i, cylno;
140 uspi = UFS_SB(sb)->s_uspi;
141 usb1 = ubh_get_usb_first(uspi);
143 UFSD("ENTER, fragment %u, count %u\n", fragment, count);
145 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
146 ufs_error (sb, "ufs_free_blocks", "internal error, "
147 "fragment %u, count %u\n", fragment, count);
155 cgno = ufs_dtog (fragment);
156 bit = ufs_dtogd (fragment);
157 if (cgno >= uspi->s_ncg) {
158 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
161 end_bit = bit + count;
162 if (end_bit > uspi->s_fpg) {
163 overflow = bit + count - uspi->s_fpg;
168 ucpi = ufs_load_cylinder (sb, cgno);
171 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
172 if (!ufs_cg_chkmagic(sb, ucg)) {
173 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 for (i = bit; i < end_bit; i += uspi->s_fpb) {
178 blkno = ufs_fragstoblks(i);
179 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
180 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
182 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
183 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
184 ufs_clusteracct (sb, ucpi, blkno, 1);
185 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
187 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
188 uspi->cs_total.cs_nbfree++;
189 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
190 cylno = ufs_cbtocylno(i);
191 fs16_add(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(i)), 1);
192 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
195 ubh_mark_buffer_dirty (USPI_UBH(uspi));
196 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
197 if (sb->s_flags & MS_SYNCHRONOUS) {
198 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
199 ubh_wait_on_buffer (UCPI_UBH(ucpi));
216 UFSD("EXIT (FAILED)\n");
221 * Modify inode page cache in such way:
222 * have - blocks with b_blocknr equal to oldb...oldb+count-1
223 * get - blocks with b_blocknr equal to newb...newb+count-1
224 * also we suppose that oldb...oldb+count-1 blocks
225 * situated at the end of file.
227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it.
230 static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page)
234 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
235 struct address_space * const mapping = inode->i_mapping;
236 pgoff_t index, cur_index;
237 unsigned end, pos, j;
239 struct buffer_head *head, *bh;
241 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
242 inode->i_ino, count, oldb, newb);
244 BUG_ON(!locked_page);
245 BUG_ON(!PageLocked(locked_page));
247 cur_index = locked_page->index;
249 for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
250 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
252 if (likely(cur_index != index)) {
253 page = ufs_get_locked_page(mapping, index);
254 if (!page || IS_ERR(page)) /* it was truncated or EIO */
259 head = page_buffers(page);
262 for (j = 0; j < pos; ++j)
263 bh = bh->b_this_page;
266 if (buffer_mapped(bh)) {
267 pos = bh->b_blocknr - oldb;
269 UFSD(" change from %llu to %llu\n",
270 (unsigned long long)pos + oldb,
271 (unsigned long long)pos + newb);
272 bh->b_blocknr = newb + pos;
273 unmap_underlying_metadata(bh->b_bdev,
275 mark_buffer_dirty(bh);
280 bh = bh->b_this_page;
281 } while (bh != head);
284 set_page_dirty(page);
286 if (likely(cur_index != index))
287 ufs_put_locked_page(page);
292 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
295 struct buffer_head *bh;
296 sector_t end = beg + n;
298 for (; beg < end; ++beg) {
299 bh = sb_getblk(inode->i_sb, beg);
301 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
302 set_buffer_uptodate(bh);
303 mark_buffer_dirty(bh);
305 if (IS_SYNC(inode) || sync)
306 sync_dirty_buffer(bh);
311 unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
312 unsigned goal, unsigned count, int * err, struct page *locked_page)
314 struct super_block * sb;
315 struct ufs_sb_private_info * uspi;
316 struct ufs_super_block_first * usb1;
317 unsigned cgno, oldcount, newcount, tmp, request, result;
319 UFSD("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count);
322 uspi = UFS_SB(sb)->s_uspi;
323 usb1 = ubh_get_usb_first(uspi);
328 tmp = fs32_to_cpu(sb, *p);
329 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
330 ufs_warning (sb, "ufs_new_fragments", "internal warning"
331 " fragment %u, count %u", fragment, count);
332 count = uspi->s_fpb - ufs_fragnum(fragment);
334 oldcount = ufs_fragnum (fragment);
335 newcount = oldcount + count;
338 * Somebody else has just allocated our fragments
342 ufs_error (sb, "ufs_new_fragments", "internal error, "
343 "fragment %u, tmp %u\n", fragment, tmp);
347 if (fragment < UFS_I(inode)->i_lastfrag) {
348 UFSD("EXIT (ALREADY ALLOCATED)\n");
355 UFSD("EXIT (ALREADY ALLOCATED)\n");
362 * There is not enough space for user on the device
364 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
366 UFSD("EXIT (FAILED)\n");
370 if (goal >= uspi->s_size)
373 cgno = ufs_inotocg (inode->i_ino);
375 cgno = ufs_dtog (goal);
378 * allocate new fragment
381 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
383 *p = cpu_to_fs32(sb, result);
385 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
386 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
387 locked_page != NULL);
390 UFSD("EXIT, result %u\n", result);
397 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
400 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
401 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
402 locked_page != NULL);
404 UFSD("EXIT, result %u\n", result);
409 * allocate new block and move data
411 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
414 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
415 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
417 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
420 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
423 request = uspi->s_fpb;
424 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
425 (uspi->s_minfree - 2) / 100)
427 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
430 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
432 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
433 locked_page != NULL);
434 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
435 result, locked_page);
437 *p = cpu_to_fs32(sb, result);
439 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
441 if (newcount < request)
442 ufs_free_fragments (inode, result + newcount, request - newcount);
443 ufs_free_fragments (inode, tmp, oldcount);
444 UFSD("EXIT, result %u\n", result);
449 UFSD("EXIT (FAILED)\n");
454 ufs_add_fragments (struct inode * inode, unsigned fragment,
455 unsigned oldcount, unsigned newcount, int * err)
457 struct super_block * sb;
458 struct ufs_sb_private_info * uspi;
459 struct ufs_super_block_first * usb1;
460 struct ufs_cg_private_info * ucpi;
461 struct ufs_cylinder_group * ucg;
462 unsigned cgno, fragno, fragoff, count, fragsize, i;
464 UFSD("ENTER, fragment %u, oldcount %u, newcount %u\n", fragment, oldcount, newcount);
467 uspi = UFS_SB(sb)->s_uspi;
468 usb1 = ubh_get_usb_first (uspi);
469 count = newcount - oldcount;
471 cgno = ufs_dtog(fragment);
472 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
474 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
476 ucpi = ufs_load_cylinder (sb, cgno);
479 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
480 if (!ufs_cg_chkmagic(sb, ucg)) {
481 ufs_panic (sb, "ufs_add_fragments",
482 "internal error, bad magic number on cg %u", cgno);
486 fragno = ufs_dtogd (fragment);
487 fragoff = ufs_fragnum (fragno);
488 for (i = oldcount; i < newcount; i++)
489 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
492 * Block can be extended
494 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
495 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
496 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
498 fragsize = i - oldcount;
499 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
500 ufs_panic (sb, "ufs_add_fragments",
501 "internal error or corrupted bitmap on cg %u", cgno);
502 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
503 if (fragsize != count)
504 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
505 for (i = oldcount; i < newcount; i++)
506 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
507 if(DQUOT_ALLOC_BLOCK(inode, count)) {
512 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
513 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
514 uspi->cs_total.cs_nffree -= count;
516 ubh_mark_buffer_dirty (USPI_UBH(uspi));
517 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
518 if (sb->s_flags & MS_SYNCHRONOUS) {
519 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
520 ubh_wait_on_buffer (UCPI_UBH(ucpi));
524 UFSD("EXIT, fragment %u\n", fragment);
529 #define UFS_TEST_FREE_SPACE_CG \
530 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
531 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
533 for (k = count; k < uspi->s_fpb; k++) \
534 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
537 static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
538 unsigned goal, unsigned count, int * err)
540 struct super_block * sb;
541 struct ufs_sb_private_info * uspi;
542 struct ufs_super_block_first * usb1;
543 struct ufs_cg_private_info * ucpi;
544 struct ufs_cylinder_group * ucg;
545 unsigned oldcg, i, j, k, result, allocsize;
547 UFSD("ENTER, ino %lu, cgno %u, goal %u, count %u\n", inode->i_ino, cgno, goal, count);
550 uspi = UFS_SB(sb)->s_uspi;
551 usb1 = ubh_get_usb_first(uspi);
555 * 1. searching on preferred cylinder group
557 UFS_TEST_FREE_SPACE_CG
560 * 2. quadratic rehash
562 for (j = 1; j < uspi->s_ncg; j *= 2) {
564 if (cgno >= uspi->s_ncg)
566 UFS_TEST_FREE_SPACE_CG
570 * 3. brute force search
571 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
573 cgno = (oldcg + 1) % uspi->s_ncg;
574 for (j = 2; j < uspi->s_ncg; j++) {
576 if (cgno >= uspi->s_ncg)
578 UFS_TEST_FREE_SPACE_CG
581 UFSD("EXIT (FAILED)\n");
585 ucpi = ufs_load_cylinder (sb, cgno);
588 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
589 if (!ufs_cg_chkmagic(sb, ucg))
590 ufs_panic (sb, "ufs_alloc_fragments",
591 "internal error, bad magic number on cg %u", cgno);
592 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
594 if (count == uspi->s_fpb) {
595 result = ufs_alloccg_block (inode, ucpi, goal, err);
596 if (result == (unsigned)-1)
601 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
602 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
605 if (allocsize == uspi->s_fpb) {
606 result = ufs_alloccg_block (inode, ucpi, goal, err);
607 if (result == (unsigned)-1)
609 goal = ufs_dtogd (result);
610 for (i = count; i < uspi->s_fpb; i++)
611 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
612 i = uspi->s_fpb - count;
613 DQUOT_FREE_BLOCK(inode, i);
615 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
616 uspi->cs_total.cs_nffree += i;
617 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
618 fs32_add(sb, &ucg->cg_frsum[i], 1);
622 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
623 if (result == (unsigned)-1)
625 if(DQUOT_ALLOC_BLOCK(inode, count)) {
629 for (i = 0; i < count; i++)
630 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
632 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
633 uspi->cs_total.cs_nffree -= count;
634 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
635 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
637 if (count != allocsize)
638 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
641 ubh_mark_buffer_dirty (USPI_UBH(uspi));
642 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
643 if (sb->s_flags & MS_SYNCHRONOUS) {
644 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
645 ubh_wait_on_buffer (UCPI_UBH(ucpi));
649 result += cgno * uspi->s_fpg;
650 UFSD("EXIT3, result %u\n", result);
654 static unsigned ufs_alloccg_block (struct inode * inode,
655 struct ufs_cg_private_info * ucpi, unsigned goal, int * err)
657 struct super_block * sb;
658 struct ufs_sb_private_info * uspi;
659 struct ufs_super_block_first * usb1;
660 struct ufs_cylinder_group * ucg;
661 unsigned result, cylno, blkno;
663 UFSD("ENTER, goal %u\n", goal);
666 uspi = UFS_SB(sb)->s_uspi;
667 usb1 = ubh_get_usb_first(uspi);
668 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
671 goal = ucpi->c_rotor;
674 goal = ufs_blknum (goal);
675 goal = ufs_dtogd (goal);
678 * If the requested block is available, use it.
680 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
686 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
687 if (result == (unsigned)-1)
689 ucpi->c_rotor = result;
691 blkno = ufs_fragstoblks(result);
692 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
693 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
694 ufs_clusteracct (sb, ucpi, blkno, -1);
695 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
700 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
701 uspi->cs_total.cs_nbfree--;
702 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
703 cylno = ufs_cbtocylno(result);
704 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno, ufs_cbtorpos(result)), 1);
705 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
707 UFSD("EXIT, result %u\n", result);
712 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
713 struct ufs_buffer_head *ubh,
714 unsigned begin, unsigned size,
715 unsigned char *table, unsigned char mask)
717 unsigned rest, offset;
721 offset = begin & ~uspi->s_fmask;
722 begin >>= uspi->s_fshift;
724 if ((offset + size) < uspi->s_fsize)
727 rest = uspi->s_fsize - offset;
729 cp = ubh->bh[begin]->b_data + offset;
730 while ((table[*cp++] & mask) == 0 && --rest)
737 return (size + rest);
741 * Find a block of the specified size in the specified cylinder group.
742 * @sp: pointer to super block
743 * @ucpi: pointer to cylinder group info
744 * @goal: near which block we want find new one
745 * @count: specified size
747 static unsigned ufs_bitmap_search(struct super_block *sb,
748 struct ufs_cg_private_info *ucpi,
749 unsigned goal, unsigned count)
752 * Bit patterns for identifying fragments in the block map
753 * used as ((map & mask_arr) == want_arr)
755 static const int mask_arr[9] = {
756 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
758 static const int want_arr[9] = {
759 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
761 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
762 struct ufs_super_block_first *usb1;
763 struct ufs_cylinder_group *ucg;
764 unsigned start, length, loc, result;
765 unsigned pos, want, blockmap, mask, end;
767 UFSD("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count);
769 usb1 = ubh_get_usb_first (uspi);
770 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
773 start = ufs_dtogd(goal) >> 3;
775 start = ucpi->c_frotor >> 3;
777 length = ((uspi->s_fpg + 7) >> 3) - start;
778 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
779 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
780 1 << (count - 1 + (uspi->s_fpb & 7)));
783 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
784 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
786 1 << (count - 1 + (uspi->s_fpb & 7)));
788 ufs_error(sb, "ufs_bitmap_search",
789 "bitmap corrupted on cg %u, start %u,"
790 " length %u, count %u, freeoff %u\n",
791 ucpi->c_cgx, start, length, count,
797 result = (start + length - loc) << 3;
798 ucpi->c_frotor = result;
801 * found the byte in the map
804 for (end = result + 8; result < end; result += uspi->s_fpb) {
805 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
807 mask = mask_arr[count];
808 want = want_arr[count];
809 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
810 if ((blockmap & mask) == want) {
811 UFSD("EXIT, result %u\n", result);
819 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
821 UFSD("EXIT (FAILED)\n");
825 static void ufs_clusteracct(struct super_block * sb,
826 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
828 struct ufs_sb_private_info * uspi;
829 int i, start, end, forw, back;
831 uspi = UFS_SB(sb)->s_uspi;
832 if (uspi->s_contigsumsize <= 0)
836 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
838 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
841 * Find the size of the cluster going forward.
844 end = start + uspi->s_contigsumsize;
845 if ( end >= ucpi->c_nclusterblks)
846 end = ucpi->c_nclusterblks;
847 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
853 * Find the size of the cluster going backward.
856 end = start - uspi->s_contigsumsize;
859 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
865 * Account for old cluster and the possibly new forward and
869 if (i > uspi->s_contigsumsize)
870 i = uspi->s_contigsumsize;
871 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
873 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
875 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
879 static unsigned char ufs_fragtable_8fpb[] = {
880 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
881 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
882 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
883 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
884 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
885 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
886 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
887 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
888 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
889 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
890 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
891 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
892 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
893 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
894 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
895 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
898 static unsigned char ufs_fragtable_other[] = {
899 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
900 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
901 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
902 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
903 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
904 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
905 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
906 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
907 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
908 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
909 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
910 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
911 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
912 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
913 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
914 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,