2 * linux/fs/ufs/balloc.c
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
12 #include <linux/ufs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/quotaops.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/bitops.h>
20 #include <asm/byteorder.h>
25 #define INVBLOCK ((u64)-1L)
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35 * Free 'count' fragments from fragment number 'fragment'
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 struct super_block * sb;
40 struct ufs_sb_private_info * uspi;
41 struct ufs_super_block_first * usb1;
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
48 uspi = UFS_SB(sb)->s_uspi;
49 usb1 = ubh_get_usb_first(uspi);
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
66 ucpi = ufs_load_cylinder (sb, cgno);
69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
87 DQUOT_FREE_BLOCK (inode, count);
90 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91 uspi->cs_total.cs_nffree += count;
92 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
94 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
97 * Trying to reassemble free fragments into block
99 blkno = ufs_fragstoblks (bbase);
100 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
101 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102 uspi->cs_total.cs_nffree -= uspi->s_fpb;
103 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105 ufs_clusteracct (sb, ucpi, blkno, 1);
106 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107 uspi->cs_total.cs_nbfree++;
108 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109 if (uspi->fs_magic != UFS2_MAGIC) {
110 unsigned cylno = ufs_cbtocylno (bbase);
112 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
113 ufs_cbtorpos(bbase)), 1);
114 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
118 ubh_mark_buffer_dirty (USPI_UBH(uspi));
119 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120 if (sb->s_flags & MS_SYNCHRONOUS) {
121 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
122 ubh_wait_on_buffer (UCPI_UBH(ucpi));
132 UFSD("EXIT (FAILED)\n");
137 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
139 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
141 struct super_block * sb;
142 struct ufs_sb_private_info * uspi;
143 struct ufs_super_block_first * usb1;
144 struct ufs_cg_private_info * ucpi;
145 struct ufs_cylinder_group * ucg;
146 unsigned overflow, cgno, bit, end_bit, i;
150 uspi = UFS_SB(sb)->s_uspi;
151 usb1 = ubh_get_usb_first(uspi);
153 UFSD("ENTER, fragment %llu, count %u\n",
154 (unsigned long long)fragment, count);
156 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
157 ufs_error (sb, "ufs_free_blocks", "internal error, "
158 "fragment %llu, count %u\n",
159 (unsigned long long)fragment, count);
167 cgno = ufs_dtog(uspi, fragment);
168 bit = ufs_dtogd(uspi, fragment);
169 if (cgno >= uspi->s_ncg) {
170 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
173 end_bit = bit + count;
174 if (end_bit > uspi->s_fpg) {
175 overflow = bit + count - uspi->s_fpg;
180 ucpi = ufs_load_cylinder (sb, cgno);
183 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
184 if (!ufs_cg_chkmagic(sb, ucg)) {
185 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
189 for (i = bit; i < end_bit; i += uspi->s_fpb) {
190 blkno = ufs_fragstoblks(i);
191 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
192 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
194 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
195 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
196 ufs_clusteracct (sb, ucpi, blkno, 1);
197 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
199 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
200 uspi->cs_total.cs_nbfree++;
201 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
203 if (uspi->fs_magic != UFS2_MAGIC) {
204 unsigned cylno = ufs_cbtocylno(i);
206 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
207 ufs_cbtorpos(i)), 1);
208 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
212 ubh_mark_buffer_dirty (USPI_UBH(uspi));
213 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
214 if (sb->s_flags & MS_SYNCHRONOUS) {
215 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
216 ubh_wait_on_buffer (UCPI_UBH(ucpi));
233 UFSD("EXIT (FAILED)\n");
238 * Modify inode page cache in such way:
239 * have - blocks with b_blocknr equal to oldb...oldb+count-1
240 * get - blocks with b_blocknr equal to newb...newb+count-1
241 * also we suppose that oldb...oldb+count-1 blocks
242 * situated at the end of file.
244 * We can come here from ufs_writepage or ufs_prepare_write,
245 * locked_page is argument of these functions, so we already lock it.
247 static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
248 unsigned int count, unsigned int oldb,
249 unsigned int newb, struct page *locked_page)
251 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
252 struct address_space * const mapping = inode->i_mapping;
253 pgoff_t index, cur_index;
254 unsigned end, pos, j;
256 struct buffer_head *head, *bh;
258 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
259 inode->i_ino, count, oldb, newb);
261 BUG_ON(!locked_page);
262 BUG_ON(!PageLocked(locked_page));
264 cur_index = locked_page->index;
266 for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
267 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
269 if (likely(cur_index != index)) {
270 page = ufs_get_locked_page(mapping, index);
271 if (!page || IS_ERR(page)) /* it was truncated or EIO */
276 head = page_buffers(page);
279 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page;
283 if (buffer_mapped(bh)) {
284 pos = bh->b_blocknr - oldb;
286 UFSD(" change from %llu to %llu\n",
287 (unsigned long long)pos + oldb,
288 (unsigned long long)pos + newb);
289 bh->b_blocknr = newb + pos;
290 unmap_underlying_metadata(bh->b_bdev,
292 mark_buffer_dirty(bh);
297 bh = bh->b_this_page;
298 } while (bh != head);
301 set_page_dirty(page);
303 if (likely(cur_index != index))
304 ufs_put_locked_page(page);
309 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
312 struct buffer_head *bh;
313 sector_t end = beg + n;
315 for (; beg < end; ++beg) {
316 bh = sb_getblk(inode->i_sb, beg);
318 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
319 set_buffer_uptodate(bh);
320 mark_buffer_dirty(bh);
322 if (IS_SYNC(inode) || sync)
323 sync_dirty_buffer(bh);
328 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
329 u64 goal, unsigned count, int *err,
330 struct page *locked_page)
332 struct super_block * sb;
333 struct ufs_sb_private_info * uspi;
334 struct ufs_super_block_first * usb1;
335 unsigned cgno, oldcount, newcount;
336 u64 tmp, request, result;
338 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
339 inode->i_ino, (unsigned long long)fragment,
340 (unsigned long long)goal, count);
343 uspi = UFS_SB(sb)->s_uspi;
344 usb1 = ubh_get_usb_first(uspi);
348 tmp = ufs_data_ptr_to_cpu(sb, p);
350 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
351 ufs_warning(sb, "ufs_new_fragments", "internal warning"
352 " fragment %llu, count %u",
353 (unsigned long long)fragment, count);
354 count = uspi->s_fpb - ufs_fragnum(fragment);
356 oldcount = ufs_fragnum (fragment);
357 newcount = oldcount + count;
360 * Somebody else has just allocated our fragments
364 ufs_error(sb, "ufs_new_fragments", "internal error, "
365 "fragment %llu, tmp %llu\n",
366 (unsigned long long)fragment,
367 (unsigned long long)tmp);
371 if (fragment < UFS_I(inode)->i_lastfrag) {
372 UFSD("EXIT (ALREADY ALLOCATED)\n");
379 UFSD("EXIT (ALREADY ALLOCATED)\n");
386 * There is not enough space for user on the device
388 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
390 UFSD("EXIT (FAILED)\n");
394 if (goal >= uspi->s_size)
397 cgno = ufs_inotocg (inode->i_ino);
399 cgno = ufs_dtog(uspi, goal);
402 * allocate new fragment
405 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
407 ufs_cpu_to_data_ptr(sb, p, result);
409 UFS_I(inode)->i_lastfrag =
410 max_t(u32, UFS_I(inode)->i_lastfrag,
412 ufs_clear_frags(inode, result + oldcount,
413 newcount - oldcount, locked_page != NULL);
416 UFSD("EXIT, result %llu\n", (unsigned long long)result);
423 result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
426 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
427 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
428 locked_page != NULL);
430 UFSD("EXIT, result %llu\n", (unsigned long long)result);
435 * allocate new block and move data
437 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
440 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
441 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
443 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
446 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
449 request = uspi->s_fpb;
450 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
451 (uspi->s_minfree - 2) / 100)
453 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
456 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
458 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
459 locked_page != NULL);
460 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
461 result, locked_page);
462 ufs_cpu_to_data_ptr(sb, p, result);
464 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
466 if (newcount < request)
467 ufs_free_fragments (inode, result + newcount, request - newcount);
468 ufs_free_fragments (inode, tmp, oldcount);
469 UFSD("EXIT, result %llu\n", (unsigned long long)result);
474 UFSD("EXIT (FAILED)\n");
478 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
479 unsigned oldcount, unsigned newcount, int *err)
481 struct super_block * sb;
482 struct ufs_sb_private_info * uspi;
483 struct ufs_super_block_first * usb1;
484 struct ufs_cg_private_info * ucpi;
485 struct ufs_cylinder_group * ucg;
486 unsigned cgno, fragno, fragoff, count, fragsize, i;
488 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
489 (unsigned long long)fragment, oldcount, newcount);
492 uspi = UFS_SB(sb)->s_uspi;
493 usb1 = ubh_get_usb_first (uspi);
494 count = newcount - oldcount;
496 cgno = ufs_dtog(uspi, fragment);
497 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
499 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
501 ucpi = ufs_load_cylinder (sb, cgno);
504 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
505 if (!ufs_cg_chkmagic(sb, ucg)) {
506 ufs_panic (sb, "ufs_add_fragments",
507 "internal error, bad magic number on cg %u", cgno);
511 fragno = ufs_dtogd(uspi, fragment);
512 fragoff = ufs_fragnum (fragno);
513 for (i = oldcount; i < newcount; i++)
514 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
517 * Block can be extended
519 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
520 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
521 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
523 fragsize = i - oldcount;
524 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
525 ufs_panic (sb, "ufs_add_fragments",
526 "internal error or corrupted bitmap on cg %u", cgno);
527 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
528 if (fragsize != count)
529 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
530 for (i = oldcount; i < newcount; i++)
531 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
532 if(DQUOT_ALLOC_BLOCK(inode, count)) {
537 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
538 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
539 uspi->cs_total.cs_nffree -= count;
541 ubh_mark_buffer_dirty (USPI_UBH(uspi));
542 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
543 if (sb->s_flags & MS_SYNCHRONOUS) {
544 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
545 ubh_wait_on_buffer (UCPI_UBH(ucpi));
549 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
554 #define UFS_TEST_FREE_SPACE_CG \
555 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
556 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
558 for (k = count; k < uspi->s_fpb; k++) \
559 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
562 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
563 u64 goal, unsigned count, int *err)
565 struct super_block * sb;
566 struct ufs_sb_private_info * uspi;
567 struct ufs_super_block_first * usb1;
568 struct ufs_cg_private_info * ucpi;
569 struct ufs_cylinder_group * ucg;
570 unsigned oldcg, i, j, k, allocsize;
573 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
574 inode->i_ino, cgno, (unsigned long long)goal, count);
577 uspi = UFS_SB(sb)->s_uspi;
578 usb1 = ubh_get_usb_first(uspi);
582 * 1. searching on preferred cylinder group
584 UFS_TEST_FREE_SPACE_CG
587 * 2. quadratic rehash
589 for (j = 1; j < uspi->s_ncg; j *= 2) {
591 if (cgno >= uspi->s_ncg)
593 UFS_TEST_FREE_SPACE_CG
597 * 3. brute force search
598 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
600 cgno = (oldcg + 1) % uspi->s_ncg;
601 for (j = 2; j < uspi->s_ncg; j++) {
603 if (cgno >= uspi->s_ncg)
605 UFS_TEST_FREE_SPACE_CG
608 UFSD("EXIT (FAILED)\n");
612 ucpi = ufs_load_cylinder (sb, cgno);
615 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
616 if (!ufs_cg_chkmagic(sb, ucg))
617 ufs_panic (sb, "ufs_alloc_fragments",
618 "internal error, bad magic number on cg %u", cgno);
619 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
621 if (count == uspi->s_fpb) {
622 result = ufs_alloccg_block (inode, ucpi, goal, err);
623 if (result == INVBLOCK)
628 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
629 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
632 if (allocsize == uspi->s_fpb) {
633 result = ufs_alloccg_block (inode, ucpi, goal, err);
634 if (result == INVBLOCK)
636 goal = ufs_dtogd(uspi, result);
637 for (i = count; i < uspi->s_fpb; i++)
638 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
639 i = uspi->s_fpb - count;
640 DQUOT_FREE_BLOCK(inode, i);
642 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
643 uspi->cs_total.cs_nffree += i;
644 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
645 fs32_add(sb, &ucg->cg_frsum[i], 1);
649 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
650 if (result == INVBLOCK)
652 if(DQUOT_ALLOC_BLOCK(inode, count)) {
656 for (i = 0; i < count; i++)
657 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
659 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
660 uspi->cs_total.cs_nffree -= count;
661 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
662 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
664 if (count != allocsize)
665 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
668 ubh_mark_buffer_dirty (USPI_UBH(uspi));
669 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
670 if (sb->s_flags & MS_SYNCHRONOUS) {
671 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
672 ubh_wait_on_buffer (UCPI_UBH(ucpi));
676 result += cgno * uspi->s_fpg;
677 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
681 static u64 ufs_alloccg_block(struct inode *inode,
682 struct ufs_cg_private_info *ucpi,
685 struct super_block * sb;
686 struct ufs_sb_private_info * uspi;
687 struct ufs_super_block_first * usb1;
688 struct ufs_cylinder_group * ucg;
691 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
694 uspi = UFS_SB(sb)->s_uspi;
695 usb1 = ubh_get_usb_first(uspi);
696 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
699 goal = ucpi->c_rotor;
702 goal = ufs_blknum (goal);
703 goal = ufs_dtogd(uspi, goal);
706 * If the requested block is available, use it.
708 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
714 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
715 if (result == INVBLOCK)
717 ucpi->c_rotor = result;
719 blkno = ufs_fragstoblks(result);
720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722 ufs_clusteracct (sb, ucpi, blkno, -1);
723 if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
728 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
729 uspi->cs_total.cs_nbfree--;
730 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
732 if (uspi->fs_magic != UFS2_MAGIC) {
733 unsigned cylno = ufs_cbtocylno((unsigned)result);
735 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
736 ufs_cbtorpos((unsigned)result)), 1);
737 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
740 UFSD("EXIT, result %llu\n", (unsigned long long)result);
745 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
746 struct ufs_buffer_head *ubh,
747 unsigned begin, unsigned size,
748 unsigned char *table, unsigned char mask)
750 unsigned rest, offset;
754 offset = begin & ~uspi->s_fmask;
755 begin >>= uspi->s_fshift;
757 if ((offset + size) < uspi->s_fsize)
760 rest = uspi->s_fsize - offset;
762 cp = ubh->bh[begin]->b_data + offset;
763 while ((table[*cp++] & mask) == 0 && --rest)
770 return (size + rest);
774 * Find a block of the specified size in the specified cylinder group.
775 * @sp: pointer to super block
776 * @ucpi: pointer to cylinder group info
777 * @goal: near which block we want find new one
778 * @count: specified size
780 static u64 ufs_bitmap_search(struct super_block *sb,
781 struct ufs_cg_private_info *ucpi,
782 u64 goal, unsigned count)
785 * Bit patterns for identifying fragments in the block map
786 * used as ((map & mask_arr) == want_arr)
788 static const int mask_arr[9] = {
789 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
791 static const int want_arr[9] = {
792 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
794 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
795 struct ufs_super_block_first *usb1;
796 struct ufs_cylinder_group *ucg;
797 unsigned start, length, loc;
798 unsigned pos, want, blockmap, mask, end;
801 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
802 (unsigned long long)goal, count);
804 usb1 = ubh_get_usb_first (uspi);
805 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
808 start = ufs_dtogd(uspi, goal) >> 3;
810 start = ucpi->c_frotor >> 3;
812 length = ((uspi->s_fpg + 7) >> 3) - start;
813 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
814 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
815 1 << (count - 1 + (uspi->s_fpb & 7)));
818 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
819 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
821 1 << (count - 1 + (uspi->s_fpb & 7)));
823 ufs_error(sb, "ufs_bitmap_search",
824 "bitmap corrupted on cg %u, start %u,"
825 " length %u, count %u, freeoff %u\n",
826 ucpi->c_cgx, start, length, count,
832 result = (start + length - loc) << 3;
833 ucpi->c_frotor = result;
836 * found the byte in the map
839 for (end = result + 8; result < end; result += uspi->s_fpb) {
840 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
842 mask = mask_arr[count];
843 want = want_arr[count];
844 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
845 if ((blockmap & mask) == want) {
846 UFSD("EXIT, result %llu\n",
847 (unsigned long long)result);
855 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
857 UFSD("EXIT (FAILED)\n");
861 static void ufs_clusteracct(struct super_block * sb,
862 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
864 struct ufs_sb_private_info * uspi;
865 int i, start, end, forw, back;
867 uspi = UFS_SB(sb)->s_uspi;
868 if (uspi->s_contigsumsize <= 0)
872 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
874 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
877 * Find the size of the cluster going forward.
880 end = start + uspi->s_contigsumsize;
881 if ( end >= ucpi->c_nclusterblks)
882 end = ucpi->c_nclusterblks;
883 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
889 * Find the size of the cluster going backward.
892 end = start - uspi->s_contigsumsize;
895 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
901 * Account for old cluster and the possibly new forward and
905 if (i > uspi->s_contigsumsize)
906 i = uspi->s_contigsumsize;
907 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
909 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
911 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
915 static unsigned char ufs_fragtable_8fpb[] = {
916 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
918 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
919 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
920 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
921 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
922 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
923 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
924 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
925 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
928 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
929 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
930 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
931 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
934 static unsigned char ufs_fragtable_other[] = {
935 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
940 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
941 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
942 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
943 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
944 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
947 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
949 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
950 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,