[PATCH] ocfs2: use zero_user_page
[linux-2.6] / fs / ufs / balloc.c
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10
11 #include <linux/fs.h>
12 #include <linux/ufs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/time.h>
15 #include <linux/string.h>
16 #include <linux/quotaops.h>
17 #include <linux/buffer_head.h>
18 #include <linux/capability.h>
19 #include <linux/bitops.h>
20 #include <asm/byteorder.h>
21
22 #include "swab.h"
23 #include "util.h"
24
25 #define INVBLOCK ((u64)-1L)
26
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39         struct super_block * sb;
40         struct ufs_sb_private_info * uspi;
41         struct ufs_super_block_first * usb1;
42         struct ufs_cg_private_info * ucpi;
43         struct ufs_cylinder_group * ucg;
44         unsigned cgno, bit, end_bit, bbase, blkmap, i;
45         u64 blkno;
46         
47         sb = inode->i_sb;
48         uspi = UFS_SB(sb)->s_uspi;
49         usb1 = ubh_get_usb_first(uspi);
50         
51         UFSD("ENTER, fragment %llu, count %u\n",
52              (unsigned long long)fragment, count);
53         
54         if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55                 ufs_error (sb, "ufs_free_fragments", "internal error");
56         
57         lock_super(sb);
58         
59         cgno = ufs_dtog(uspi, fragment);
60         bit = ufs_dtogd(uspi, fragment);
61         if (cgno >= uspi->s_ncg) {
62                 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63                 goto failed;
64         }
65                 
66         ucpi = ufs_load_cylinder (sb, cgno);
67         if (!ucpi) 
68                 goto failed;
69         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
70         if (!ufs_cg_chkmagic(sb, ucg)) {
71                 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72                 goto failed;
73         }
74
75         end_bit = bit + count;
76         bbase = ufs_blknum (bit);
77         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
78         ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79         for (i = bit; i < end_bit; i++) {
80                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
82                 else 
83                         ufs_error (sb, "ufs_free_fragments",
84                                    "bit already cleared for fragment %u", i);
85         }
86         
87         DQUOT_FREE_BLOCK (inode, count);
88
89         
90         fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
91         uspi->cs_total.cs_nffree += count;
92         fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
93         blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
94         ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
95
96         /*
97          * Trying to reassemble free fragments into block
98          */
99         blkno = ufs_fragstoblks (bbase);
100         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
101                 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
102                 uspi->cs_total.cs_nffree -= uspi->s_fpb;
103                 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
104                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
105                         ufs_clusteracct (sb, ucpi, blkno, 1);
106                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
107                 uspi->cs_total.cs_nbfree++;
108                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
109                 if (uspi->fs_magic != UFS2_MAGIC) {
110                         unsigned cylno = ufs_cbtocylno (bbase);
111
112                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
113                                                   ufs_cbtorpos(bbase)), 1);
114                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
115                 }
116         }
117         
118         ubh_mark_buffer_dirty (USPI_UBH(uspi));
119         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
120         if (sb->s_flags & MS_SYNCHRONOUS) {
121                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
122                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
123         }
124         sb->s_dirt = 1;
125         
126         unlock_super (sb);
127         UFSD("EXIT\n");
128         return;
129
130 failed:
131         unlock_super (sb);
132         UFSD("EXIT (FAILED)\n");
133         return;
134 }
135
136 /*
137  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
138  */
139 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
140 {
141         struct super_block * sb;
142         struct ufs_sb_private_info * uspi;
143         struct ufs_super_block_first * usb1;
144         struct ufs_cg_private_info * ucpi;
145         struct ufs_cylinder_group * ucg;
146         unsigned overflow, cgno, bit, end_bit, i;
147         u64 blkno;
148         
149         sb = inode->i_sb;
150         uspi = UFS_SB(sb)->s_uspi;
151         usb1 = ubh_get_usb_first(uspi);
152
153         UFSD("ENTER, fragment %llu, count %u\n",
154              (unsigned long long)fragment, count);
155         
156         if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
157                 ufs_error (sb, "ufs_free_blocks", "internal error, "
158                            "fragment %llu, count %u\n",
159                            (unsigned long long)fragment, count);
160                 goto failed;
161         }
162
163         lock_super(sb);
164         
165 do_more:
166         overflow = 0;
167         cgno = ufs_dtog(uspi, fragment);
168         bit = ufs_dtogd(uspi, fragment);
169         if (cgno >= uspi->s_ncg) {
170                 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
171                 goto failed_unlock;
172         }
173         end_bit = bit + count;
174         if (end_bit > uspi->s_fpg) {
175                 overflow = bit + count - uspi->s_fpg;
176                 count -= overflow;
177                 end_bit -= overflow;
178         }
179
180         ucpi = ufs_load_cylinder (sb, cgno);
181         if (!ucpi) 
182                 goto failed_unlock;
183         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
184         if (!ufs_cg_chkmagic(sb, ucg)) {
185                 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
186                 goto failed_unlock;
187         }
188
189         for (i = bit; i < end_bit; i += uspi->s_fpb) {
190                 blkno = ufs_fragstoblks(i);
191                 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
192                         ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
193                 }
194                 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
195                 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
196                         ufs_clusteracct (sb, ucpi, blkno, 1);
197                 DQUOT_FREE_BLOCK(inode, uspi->s_fpb);
198
199                 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
200                 uspi->cs_total.cs_nbfree++;
201                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
202
203                 if (uspi->fs_magic != UFS2_MAGIC) {
204                         unsigned cylno = ufs_cbtocylno(i);
205
206                         fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
207                                                   ufs_cbtorpos(i)), 1);
208                         fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
209                 }
210         }
211
212         ubh_mark_buffer_dirty (USPI_UBH(uspi));
213         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
214         if (sb->s_flags & MS_SYNCHRONOUS) {
215                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
216                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
217         }
218
219         if (overflow) {
220                 fragment += count;
221                 count = overflow;
222                 goto do_more;
223         }
224
225         sb->s_dirt = 1;
226         unlock_super (sb);
227         UFSD("EXIT\n");
228         return;
229
230 failed_unlock:
231         unlock_super (sb);
232 failed:
233         UFSD("EXIT (FAILED)\n");
234         return;
235 }
236
237 /*
238  * Modify inode page cache in such way:
239  * have - blocks with b_blocknr equal to oldb...oldb+count-1
240  * get - blocks with b_blocknr equal to newb...newb+count-1
241  * also we suppose that oldb...oldb+count-1 blocks
242  * situated at the end of file.
243  *
244  * We can come here from ufs_writepage or ufs_prepare_write,
245  * locked_page is argument of these functions, so we already lock it.
246  */
247 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
248                                unsigned int count, sector_t oldb,
249                                sector_t newb, struct page *locked_page)
250 {
251         const unsigned blks_per_page =
252                 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253         const unsigned mask = blks_per_page - 1;
254         struct address_space * const mapping = inode->i_mapping;
255         pgoff_t index, cur_index, last_index;
256         unsigned pos, j, lblock;
257         sector_t end, i;
258         struct page *page;
259         struct buffer_head *head, *bh;
260
261         UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
262               inode->i_ino, count,
263              (unsigned long long)oldb, (unsigned long long)newb);
264
265         BUG_ON(!locked_page);
266         BUG_ON(!PageLocked(locked_page));
267
268         cur_index = locked_page->index;
269         end = count + beg;
270         last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
271         for (i = beg; i < end; i = (i | mask) + 1) {
272                 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
273
274                 if (likely(cur_index != index)) {
275                         page = ufs_get_locked_page(mapping, index);
276                         if (!page)/* it was truncated */
277                                 continue;
278                         if (IS_ERR(page)) {/* or EIO */
279                                 ufs_error(inode->i_sb, __FUNCTION__,
280                                           "read of page %llu failed\n",
281                                           (unsigned long long)index);
282                                 continue;
283                         }
284                 } else
285                         page = locked_page;
286
287                 head = page_buffers(page);
288                 bh = head;
289                 pos = i & mask;
290                 for (j = 0; j < pos; ++j)
291                         bh = bh->b_this_page;
292
293
294                 if (unlikely(index == last_index))
295                         lblock = end & mask;
296                 else
297                         lblock = blks_per_page;
298
299                 do {
300                         if (j >= lblock)
301                                 break;
302                         pos = (i - beg) + j;
303
304                         if (!buffer_mapped(bh))
305                                         map_bh(bh, inode->i_sb, oldb + pos);
306                         if (!buffer_uptodate(bh)) {
307                                 ll_rw_block(READ, 1, &bh);
308                                 wait_on_buffer(bh);
309                                 if (!buffer_uptodate(bh)) {
310                                         ufs_error(inode->i_sb, __FUNCTION__,
311                                                   "read of block failed\n");
312                                         break;
313                                 }
314                         }
315
316                         UFSD(" change from %llu to %llu, pos %u\n",
317                              (unsigned long long)pos + oldb,
318                              (unsigned long long)pos + newb, pos);
319
320                         bh->b_blocknr = newb + pos;
321                         unmap_underlying_metadata(bh->b_bdev,
322                                                   bh->b_blocknr);
323                         mark_buffer_dirty(bh);
324                         ++j;
325                         bh = bh->b_this_page;
326                 } while (bh != head);
327
328                 if (likely(cur_index != index))
329                         ufs_put_locked_page(page);
330         }
331         UFSD("EXIT\n");
332 }
333
334 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
335                             int sync)
336 {
337         struct buffer_head *bh;
338         sector_t end = beg + n;
339
340         for (; beg < end; ++beg) {
341                 bh = sb_getblk(inode->i_sb, beg);
342                 lock_buffer(bh);
343                 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
344                 set_buffer_uptodate(bh);
345                 mark_buffer_dirty(bh);
346                 unlock_buffer(bh);
347                 if (IS_SYNC(inode) || sync)
348                         sync_dirty_buffer(bh);
349                 brelse(bh);
350         }
351 }
352
353 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
354                            u64 goal, unsigned count, int *err,
355                            struct page *locked_page)
356 {
357         struct super_block * sb;
358         struct ufs_sb_private_info * uspi;
359         struct ufs_super_block_first * usb1;
360         unsigned cgno, oldcount, newcount;
361         u64 tmp, request, result;
362         
363         UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
364              inode->i_ino, (unsigned long long)fragment,
365              (unsigned long long)goal, count);
366         
367         sb = inode->i_sb;
368         uspi = UFS_SB(sb)->s_uspi;
369         usb1 = ubh_get_usb_first(uspi);
370         *err = -ENOSPC;
371
372         lock_super (sb);
373         tmp = ufs_data_ptr_to_cpu(sb, p);
374
375         if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
376                 ufs_warning(sb, "ufs_new_fragments", "internal warning"
377                             " fragment %llu, count %u",
378                             (unsigned long long)fragment, count);
379                 count = uspi->s_fpb - ufs_fragnum(fragment); 
380         }
381         oldcount = ufs_fragnum (fragment);
382         newcount = oldcount + count;
383
384         /*
385          * Somebody else has just allocated our fragments
386          */
387         if (oldcount) {
388                 if (!tmp) {
389                         ufs_error(sb, "ufs_new_fragments", "internal error, "
390                                   "fragment %llu, tmp %llu\n",
391                                   (unsigned long long)fragment,
392                                   (unsigned long long)tmp);
393                         unlock_super(sb);
394                         return INVBLOCK;
395                 }
396                 if (fragment < UFS_I(inode)->i_lastfrag) {
397                         UFSD("EXIT (ALREADY ALLOCATED)\n");
398                         unlock_super (sb);
399                         return 0;
400                 }
401         }
402         else {
403                 if (tmp) {
404                         UFSD("EXIT (ALREADY ALLOCATED)\n");
405                         unlock_super(sb);
406                         return 0;
407                 }
408         }
409
410         /*
411          * There is not enough space for user on the device
412          */
413         if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
414                 unlock_super (sb);
415                 UFSD("EXIT (FAILED)\n");
416                 return 0;
417         }
418
419         if (goal >= uspi->s_size) 
420                 goal = 0;
421         if (goal == 0) 
422                 cgno = ufs_inotocg (inode->i_ino);
423         else
424                 cgno = ufs_dtog(uspi, goal);
425          
426         /*
427          * allocate new fragment
428          */
429         if (oldcount == 0) {
430                 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
431                 if (result) {
432                         ufs_cpu_to_data_ptr(sb, p, result);
433                         *err = 0;
434                         UFS_I(inode)->i_lastfrag =
435                                 max_t(u32, UFS_I(inode)->i_lastfrag,
436                                       fragment + count);
437                         ufs_clear_frags(inode, result + oldcount,
438                                         newcount - oldcount, locked_page != NULL);
439                 }
440                 unlock_super(sb);
441                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
442                 return result;
443         }
444
445         /*
446          * resize block
447          */
448         result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
449         if (result) {
450                 *err = 0;
451                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
452                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
453                                 locked_page != NULL);
454                 unlock_super(sb);
455                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
456                 return result;
457         }
458
459         /*
460          * allocate new block and move data
461          */
462         switch (fs32_to_cpu(sb, usb1->fs_optim)) {
463             case UFS_OPTSPACE:
464                 request = newcount;
465                 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
466                     > uspi->s_dsize * uspi->s_minfree / (2 * 100))
467                         break;
468                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469                 break;
470             default:
471                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
472         
473             case UFS_OPTTIME:
474                 request = uspi->s_fpb;
475                 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
476                     (uspi->s_minfree - 2) / 100)
477                         break;
478                 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
479                 break;
480         }
481         result = ufs_alloc_fragments (inode, cgno, goal, request, err);
482         if (result) {
483                 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
484                                 locked_page != NULL);
485                 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
486                                    uspi->s_sbbase + tmp,
487                                    uspi->s_sbbase + result, locked_page);
488                 ufs_cpu_to_data_ptr(sb, p, result);
489                 *err = 0;
490                 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
491                 unlock_super(sb);
492                 if (newcount < request)
493                         ufs_free_fragments (inode, result + newcount, request - newcount);
494                 ufs_free_fragments (inode, tmp, oldcount);
495                 UFSD("EXIT, result %llu\n", (unsigned long long)result);
496                 return result;
497         }
498
499         unlock_super(sb);
500         UFSD("EXIT (FAILED)\n");
501         return 0;
502 }               
503
504 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
505                              unsigned oldcount, unsigned newcount, int *err)
506 {
507         struct super_block * sb;
508         struct ufs_sb_private_info * uspi;
509         struct ufs_super_block_first * usb1;
510         struct ufs_cg_private_info * ucpi;
511         struct ufs_cylinder_group * ucg;
512         unsigned cgno, fragno, fragoff, count, fragsize, i;
513         
514         UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
515              (unsigned long long)fragment, oldcount, newcount);
516         
517         sb = inode->i_sb;
518         uspi = UFS_SB(sb)->s_uspi;
519         usb1 = ubh_get_usb_first (uspi);
520         count = newcount - oldcount;
521         
522         cgno = ufs_dtog(uspi, fragment);
523         if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
524                 return 0;
525         if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
526                 return 0;
527         ucpi = ufs_load_cylinder (sb, cgno);
528         if (!ucpi)
529                 return 0;
530         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
531         if (!ufs_cg_chkmagic(sb, ucg)) {
532                 ufs_panic (sb, "ufs_add_fragments",
533                         "internal error, bad magic number on cg %u", cgno);
534                 return 0;
535         }
536
537         fragno = ufs_dtogd(uspi, fragment);
538         fragoff = ufs_fragnum (fragno);
539         for (i = oldcount; i < newcount; i++)
540                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
541                         return 0;
542         /*
543          * Block can be extended
544          */
545         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
546         for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
547                 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
548                         break;
549         fragsize = i - oldcount;
550         if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
551                 ufs_panic (sb, "ufs_add_fragments",
552                         "internal error or corrupted bitmap on cg %u", cgno);
553         fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
554         if (fragsize != count)
555                 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
556         for (i = oldcount; i < newcount; i++)
557                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
558         if(DQUOT_ALLOC_BLOCK(inode, count)) {
559                 *err = -EDQUOT;
560                 return 0;
561         }
562
563         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
564         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
565         uspi->cs_total.cs_nffree -= count;
566         
567         ubh_mark_buffer_dirty (USPI_UBH(uspi));
568         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
569         if (sb->s_flags & MS_SYNCHRONOUS) {
570                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
571                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
572         }
573         sb->s_dirt = 1;
574
575         UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
576         
577         return fragment;
578 }
579
580 #define UFS_TEST_FREE_SPACE_CG \
581         ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
582         if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
583                 goto cg_found; \
584         for (k = count; k < uspi->s_fpb; k++) \
585                 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
586                         goto cg_found; 
587
588 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
589                                u64 goal, unsigned count, int *err)
590 {
591         struct super_block * sb;
592         struct ufs_sb_private_info * uspi;
593         struct ufs_super_block_first * usb1;
594         struct ufs_cg_private_info * ucpi;
595         struct ufs_cylinder_group * ucg;
596         unsigned oldcg, i, j, k, allocsize;
597         u64 result;
598         
599         UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
600              inode->i_ino, cgno, (unsigned long long)goal, count);
601
602         sb = inode->i_sb;
603         uspi = UFS_SB(sb)->s_uspi;
604         usb1 = ubh_get_usb_first(uspi);
605         oldcg = cgno;
606         
607         /*
608          * 1. searching on preferred cylinder group
609          */
610         UFS_TEST_FREE_SPACE_CG
611
612         /*
613          * 2. quadratic rehash
614          */
615         for (j = 1; j < uspi->s_ncg; j *= 2) {
616                 cgno += j;
617                 if (cgno >= uspi->s_ncg) 
618                         cgno -= uspi->s_ncg;
619                 UFS_TEST_FREE_SPACE_CG
620         }
621
622         /*
623          * 3. brute force search
624          * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
625          */
626         cgno = (oldcg + 1) % uspi->s_ncg;
627         for (j = 2; j < uspi->s_ncg; j++) {
628                 cgno++;
629                 if (cgno >= uspi->s_ncg)
630                         cgno = 0;
631                 UFS_TEST_FREE_SPACE_CG
632         }
633         
634         UFSD("EXIT (FAILED)\n");
635         return 0;
636
637 cg_found:
638         ucpi = ufs_load_cylinder (sb, cgno);
639         if (!ucpi)
640                 return 0;
641         ucg = ubh_get_ucg (UCPI_UBH(ucpi));
642         if (!ufs_cg_chkmagic(sb, ucg)) 
643                 ufs_panic (sb, "ufs_alloc_fragments",
644                         "internal error, bad magic number on cg %u", cgno);
645         ucg->cg_time = cpu_to_fs32(sb, get_seconds());
646
647         if (count == uspi->s_fpb) {
648                 result = ufs_alloccg_block (inode, ucpi, goal, err);
649                 if (result == INVBLOCK)
650                         return 0;
651                 goto succed;
652         }
653
654         for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
655                 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
656                         break;
657         
658         if (allocsize == uspi->s_fpb) {
659                 result = ufs_alloccg_block (inode, ucpi, goal, err);
660                 if (result == INVBLOCK)
661                         return 0;
662                 goal = ufs_dtogd(uspi, result);
663                 for (i = count; i < uspi->s_fpb; i++)
664                         ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
665                 i = uspi->s_fpb - count;
666                 DQUOT_FREE_BLOCK(inode, i);
667
668                 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
669                 uspi->cs_total.cs_nffree += i;
670                 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
671                 fs32_add(sb, &ucg->cg_frsum[i], 1);
672                 goto succed;
673         }
674
675         result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
676         if (result == INVBLOCK)
677                 return 0;
678         if(DQUOT_ALLOC_BLOCK(inode, count)) {
679                 *err = -EDQUOT;
680                 return 0;
681         }
682         for (i = 0; i < count; i++)
683                 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
684         
685         fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
686         uspi->cs_total.cs_nffree -= count;
687         fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
688         fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
689
690         if (count != allocsize)
691                 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
692
693 succed:
694         ubh_mark_buffer_dirty (USPI_UBH(uspi));
695         ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
696         if (sb->s_flags & MS_SYNCHRONOUS) {
697                 ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
698                 ubh_wait_on_buffer (UCPI_UBH(ucpi));
699         }
700         sb->s_dirt = 1;
701
702         result += cgno * uspi->s_fpg;
703         UFSD("EXIT3, result %llu\n", (unsigned long long)result);
704         return result;
705 }
706
707 static u64 ufs_alloccg_block(struct inode *inode,
708                              struct ufs_cg_private_info *ucpi,
709                              u64 goal, int *err)
710 {
711         struct super_block * sb;
712         struct ufs_sb_private_info * uspi;
713         struct ufs_super_block_first * usb1;
714         struct ufs_cylinder_group * ucg;
715         u64 result, blkno;
716
717         UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
718
719         sb = inode->i_sb;
720         uspi = UFS_SB(sb)->s_uspi;
721         usb1 = ubh_get_usb_first(uspi);
722         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
723
724         if (goal == 0) {
725                 goal = ucpi->c_rotor;
726                 goto norot;
727         }
728         goal = ufs_blknum (goal);
729         goal = ufs_dtogd(uspi, goal);
730         
731         /*
732          * If the requested block is available, use it.
733          */
734         if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
735                 result = goal;
736                 goto gotit;
737         }
738         
739 norot:  
740         result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
741         if (result == INVBLOCK)
742                 return INVBLOCK;
743         ucpi->c_rotor = result;
744 gotit:
745         blkno = ufs_fragstoblks(result);
746         ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
747         if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
748                 ufs_clusteracct (sb, ucpi, blkno, -1);
749         if(DQUOT_ALLOC_BLOCK(inode, uspi->s_fpb)) {
750                 *err = -EDQUOT;
751                 return INVBLOCK;
752         }
753
754         fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
755         uspi->cs_total.cs_nbfree--;
756         fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
757
758         if (uspi->fs_magic != UFS2_MAGIC) {
759                 unsigned cylno = ufs_cbtocylno((unsigned)result);
760
761                 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
762                                           ufs_cbtorpos((unsigned)result)), 1);
763                 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
764         }
765         
766         UFSD("EXIT, result %llu\n", (unsigned long long)result);
767
768         return result;
769 }
770
771 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
772                           struct ufs_buffer_head *ubh,
773                           unsigned begin, unsigned size,
774                           unsigned char *table, unsigned char mask)
775 {
776         unsigned rest, offset;
777         unsigned char *cp;
778         
779
780         offset = begin & ~uspi->s_fmask;
781         begin >>= uspi->s_fshift;
782         for (;;) {
783                 if ((offset + size) < uspi->s_fsize)
784                         rest = size;
785                 else
786                         rest = uspi->s_fsize - offset;
787                 size -= rest;
788                 cp = ubh->bh[begin]->b_data + offset;
789                 while ((table[*cp++] & mask) == 0 && --rest)
790                         ;
791                 if (rest || !size)
792                         break;
793                 begin++;
794                 offset = 0;
795         }
796         return (size + rest);
797 }
798
799 /*
800  * Find a block of the specified size in the specified cylinder group.
801  * @sp: pointer to super block
802  * @ucpi: pointer to cylinder group info
803  * @goal: near which block we want find new one
804  * @count: specified size
805  */
806 static u64 ufs_bitmap_search(struct super_block *sb,
807                              struct ufs_cg_private_info *ucpi,
808                              u64 goal, unsigned count)
809 {
810         /*
811          * Bit patterns for identifying fragments in the block map
812          * used as ((map & mask_arr) == want_arr)
813          */
814         static const int mask_arr[9] = {
815                 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
816         };
817         static const int want_arr[9] = {
818                 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
819         };
820         struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
821         struct ufs_super_block_first *usb1;
822         struct ufs_cylinder_group *ucg;
823         unsigned start, length, loc;
824         unsigned pos, want, blockmap, mask, end;
825         u64 result;
826
827         UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
828              (unsigned long long)goal, count);
829
830         usb1 = ubh_get_usb_first (uspi);
831         ucg = ubh_get_ucg(UCPI_UBH(ucpi));
832
833         if (goal)
834                 start = ufs_dtogd(uspi, goal) >> 3;
835         else
836                 start = ucpi->c_frotor >> 3;
837                 
838         length = ((uspi->s_fpg + 7) >> 3) - start;
839         loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
840                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
841                 1 << (count - 1 + (uspi->s_fpb & 7))); 
842         if (loc == 0) {
843                 length = start + 1;
844                 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
845                                 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
846                                 ufs_fragtable_other,
847                                 1 << (count - 1 + (uspi->s_fpb & 7)));
848                 if (loc == 0) {
849                         ufs_error(sb, "ufs_bitmap_search",
850                                   "bitmap corrupted on cg %u, start %u,"
851                                   " length %u, count %u, freeoff %u\n",
852                                   ucpi->c_cgx, start, length, count,
853                                   ucpi->c_freeoff);
854                         return INVBLOCK;
855                 }
856                 start = 0;
857         }
858         result = (start + length - loc) << 3;
859         ucpi->c_frotor = result;
860
861         /*
862          * found the byte in the map
863          */
864
865         for (end = result + 8; result < end; result += uspi->s_fpb) {
866                 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
867                 blockmap <<= 1;
868                 mask = mask_arr[count];
869                 want = want_arr[count];
870                 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
871                         if ((blockmap & mask) == want) {
872                                 UFSD("EXIT, result %llu\n",
873                                      (unsigned long long)result);
874                                 return result + pos;
875                         }
876                         mask <<= 1;
877                         want <<= 1;
878                 }
879         }
880
881         ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
882                   ucpi->c_cgx);
883         UFSD("EXIT (FAILED)\n");
884         return INVBLOCK;
885 }
886
887 static void ufs_clusteracct(struct super_block * sb,
888         struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
889 {
890         struct ufs_sb_private_info * uspi;
891         int i, start, end, forw, back;
892         
893         uspi = UFS_SB(sb)->s_uspi;
894         if (uspi->s_contigsumsize <= 0)
895                 return;
896
897         if (cnt > 0)
898                 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
899         else
900                 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
901
902         /*
903          * Find the size of the cluster going forward.
904          */
905         start = blkno + 1;
906         end = start + uspi->s_contigsumsize;
907         if ( end >= ucpi->c_nclusterblks)
908                 end = ucpi->c_nclusterblks;
909         i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
910         if (i > end)
911                 i = end;
912         forw = i - start;
913         
914         /*
915          * Find the size of the cluster going backward.
916          */
917         start = blkno - 1;
918         end = start - uspi->s_contigsumsize;
919         if (end < 0 ) 
920                 end = -1;
921         i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
922         if ( i < end) 
923                 i = end;
924         back = start - i;
925         
926         /*
927          * Account for old cluster and the possibly new forward and
928          * back clusters.
929          */
930         i = back + forw + 1;
931         if (i > uspi->s_contigsumsize)
932                 i = uspi->s_contigsumsize;
933         fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
934         if (back > 0)
935                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
936         if (forw > 0)
937                 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
938 }
939
940
941 static unsigned char ufs_fragtable_8fpb[] = {
942         0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
943         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
944         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
945         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
946         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, 
947         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
948         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
949         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
950         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
951         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
952         0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
953         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
954         0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
955         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
956         0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
957         0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
958 };
959
960 static unsigned char ufs_fragtable_other[] = {
961         0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
962         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
963         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
964         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
965         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
966         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
967         0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
968         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
969         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
970         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
971         0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
972         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
973         0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
974         0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
975         0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
976         0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
977 };