2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
4 /* Reiserfs block (de)allocator, bitmap-based. */
6 #include <linux/config.h>
7 #include <linux/time.h>
8 #include <linux/reiserfs_fs.h>
9 #include <linux/errno.h>
10 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
12 #include <linux/pagemap.h>
13 #include <linux/reiserfs_fs_sb.h>
14 #include <linux/reiserfs_fs_i.h>
15 #include <linux/quotaops.h>
17 #define PREALLOCATION_SIZE 9
19 /* different reiserfs block allocator options */
21 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
23 #define _ALLOC_concentrating_formatted_nodes 0
24 #define _ALLOC_displacing_large_files 1
25 #define _ALLOC_displacing_new_packing_localities 2
26 #define _ALLOC_old_hashed_relocation 3
27 #define _ALLOC_new_hashed_relocation 4
28 #define _ALLOC_skip_busy 5
29 #define _ALLOC_displace_based_on_dirid 6
30 #define _ALLOC_hashed_formatted_nodes 7
31 #define _ALLOC_old_way 8
32 #define _ALLOC_hundredth_slices 9
33 #define _ALLOC_dirid_groups 10
34 #define _ALLOC_oid_groups 11
35 #define _ALLOC_packing_groups 12
37 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
38 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
39 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
41 #define SET_OPTION(optname) \
43 reiserfs_warning(s, "reiserfs: option \"%s\" is set", #optname); \
44 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
46 #define TEST_OPTION(optname, s) \
47 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
49 static inline void get_bit_address (struct super_block * s,
50 b_blocknr_t block, int * bmap_nr, int * offset)
52 /* It is in the bitmap block number equal to the block
53 * number divided by the number of bits in a block. */
54 *bmap_nr = block / (s->s_blocksize << 3);
55 /* Within that bitmap block it is located at bit offset *offset. */
56 *offset = block & ((s->s_blocksize << 3) - 1 );
60 #ifdef CONFIG_REISERFS_CHECK
61 int is_reusable (struct super_block * s, b_blocknr_t block, int bit_value)
65 if (block == 0 || block >= SB_BLOCK_COUNT (s)) {
66 reiserfs_warning (s, "vs-4010: is_reusable: block number is out of range %lu (%u)",
67 block, SB_BLOCK_COUNT (s));
71 /* it can't be one of the bitmap blocks */
72 for (i = 0; i < SB_BMAP_NR (s); i ++)
73 if (block == SB_AP_BITMAP (s)[i].bh->b_blocknr) {
74 reiserfs_warning (s, "vs: 4020: is_reusable: "
75 "bitmap block %lu(%u) can't be freed or reused",
76 block, SB_BMAP_NR (s));
80 get_bit_address (s, block, &i, &j);
82 if (i >= SB_BMAP_NR (s)) {
83 reiserfs_warning (s, "vs-4030: is_reusable: there is no so many bitmap blocks: "
84 "block=%lu, bitmap_nr=%d", block, i);
88 if ((bit_value == 0 &&
89 reiserfs_test_le_bit(j, SB_AP_BITMAP(s)[i].bh->b_data)) ||
91 reiserfs_test_le_bit(j, SB_AP_BITMAP (s)[i].bh->b_data) == 0)) {
92 reiserfs_warning (s, "vs-4040: is_reusable: corresponding bit of block %lu does not "
93 "match required value (i==%d, j==%d) test_bit==%d",
94 block, i, j, reiserfs_test_le_bit (j, SB_AP_BITMAP (s)[i].bh->b_data));
99 if (bit_value == 0 && block == SB_ROOT_BLOCK (s)) {
100 reiserfs_warning (s, "vs-4050: is_reusable: this is root block (%u), "
101 "it must be busy", SB_ROOT_BLOCK (s));
107 #endif /* CONFIG_REISERFS_CHECK */
109 /* searches in journal structures for a given block number (bmap, off). If block
110 is found in reiserfs journal it suggests next free block candidate to test. */
111 static inline int is_block_in_journal (struct super_block * s, int bmap, int
116 if (reiserfs_in_journal (s, bmap, off, 1, &tmp)) {
117 if (tmp) { /* hint supplied */
119 PROC_INFO_INC( s, scan_bitmap.in_journal_hint );
121 (*next) = off + 1; /* inc offset to avoid looping. */
122 PROC_INFO_INC( s, scan_bitmap.in_journal_nohint );
124 PROC_INFO_INC( s, scan_bitmap.retry );
130 /* it searches for a window of zero bits with given minimum and maximum lengths in one bitmap
132 static int scan_bitmap_block (struct reiserfs_transaction_handle *th,
133 int bmap_n, int *beg, int boundary, int min, int max, int unfm)
135 struct super_block *s = th->t_super;
136 struct reiserfs_bitmap_info *bi=&SB_AP_BITMAP(s)[bmap_n];
140 BUG_ON (!th->t_trans_id);
142 RFALSE(bmap_n >= SB_BMAP_NR (s), "Bitmap %d is out of range (0..%d)",bmap_n, SB_BMAP_NR (s) - 1);
143 PROC_INFO_INC( s, scan_bitmap.bmap );
144 /* this is unclear and lacks comments, explain how journal bitmaps
145 work here for the reader. Convey a sense of the design here. What
147 /* - I mean `a window of zero bits' as in description of this function - Zam. */
150 reiserfs_warning (s, "NULL bitmap info pointer for bitmap %d", bmap_n);
153 if (buffer_locked (bi->bh)) {
154 PROC_INFO_INC( s, scan_bitmap.wait );
155 __wait_on_buffer (bi->bh);
160 if (bi->free_count < min)
161 return 0; // No free blocks in this bitmap
163 /* search for a first zero bit -- beggining of a window */
164 *beg = reiserfs_find_next_zero_le_bit
165 ((unsigned long*)(bi->bh->b_data), boundary, *beg);
167 if (*beg + min > boundary) { /* search for a zero bit fails or the rest of bitmap block
168 * cannot contain a zero window of minimum size */
172 if (unfm && is_block_in_journal(s,bmap_n, *beg, beg))
174 /* first zero bit found; we check next bits */
175 for (end = *beg + 1;; end ++) {
176 if (end >= *beg + max || end >= boundary || reiserfs_test_le_bit (end, bi->bh->b_data)) {
180 /* finding the other end of zero bit window requires looking into journal structures (in
181 * case of searching for free blocks for unformatted nodes) */
182 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
186 /* now (*beg) points to beginning of zero bits window,
187 * (end) points to one bit after the window end */
188 if (end - *beg >= min) { /* it seems we have found window of proper size */
190 reiserfs_prepare_for_journal (s, bi->bh, 1);
191 /* try to set all blocks used checking are they still free */
192 for (i = *beg; i < end; i++) {
193 /* It seems that we should not check in journal again. */
194 if (reiserfs_test_and_set_le_bit (i, bi->bh->b_data)) {
195 /* bit was set by another process
196 * while we slept in prepare_for_journal() */
197 PROC_INFO_INC( s, scan_bitmap.stolen );
198 if (i >= *beg + min) { /* we can continue with smaller set of allocated blocks,
199 * if length of this set is more or equal to `min' */
203 /* otherwise we clear all bit were set ... */
205 reiserfs_test_and_clear_le_bit (i, bi->bh->b_data);
206 reiserfs_restore_prepared_buffer (s, bi->bh);
208 /* ... and search again in current block from beginning */
212 bi->free_count -= (end - *beg);
213 journal_mark_dirty (th, s, bi->bh);
215 /* free block count calculation */
216 reiserfs_prepare_for_journal (s, SB_BUFFER_WITH_SB(s), 1);
217 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
218 journal_mark_dirty (th, s, SB_BUFFER_WITH_SB(s));
227 static int bmap_hash_id(struct super_block *s, u32 id) {
228 char * hash_in = NULL;
235 hash_in = (char *)(&id);
236 hash = keyed_hash(hash_in, 4);
237 bm = hash % SB_BMAP_NR(s);
241 /* this can only be true when SB_BMAP_NR = 1 */
242 if (bm >= SB_BMAP_NR(s))
248 * hashes the id and then returns > 0 if the block group for the
249 * corresponding hash is full
251 static inline int block_group_used(struct super_block *s, u32 id) {
253 bm = bmap_hash_id(s, id);
254 if (SB_AP_BITMAP(s)[bm].free_count > ((s->s_blocksize << 3) * 60 / 100) ) {
261 * the packing is returned in disk byte order
263 __le32 reiserfs_choose_packing(struct inode *dir)
266 if (TEST_OPTION(packing_groups, dir->i_sb)) {
267 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
269 * some versions of reiserfsck expect packing locality 1 to be
272 if (parent_dir == 1 || block_group_used(dir->i_sb,parent_dir))
273 packing = INODE_PKEY(dir)->k_objectid;
275 packing = INODE_PKEY(dir)->k_dir_id;
277 packing = INODE_PKEY(dir)->k_objectid;
281 /* Tries to find contiguous zero bit window (given size) in given region of
282 * bitmap and place new blocks there. Returns number of allocated blocks. */
283 static int scan_bitmap (struct reiserfs_transaction_handle *th,
284 b_blocknr_t *start, b_blocknr_t finish,
285 int min, int max, int unfm, unsigned long file_block)
288 struct super_block * s = th->t_super;
289 /* find every bm and bmap and bmap_nr in this file, and change them all to bitmap_blocknr
290 * - Hans, it is not a block number - Zam. */
294 int off_max = s->s_blocksize << 3;
296 BUG_ON (!th->t_trans_id);
298 PROC_INFO_INC( s, scan_bitmap.call );
299 if ( SB_FREE_BLOCKS(s) <= 0)
300 return 0; // No point in looking for more free blocks
302 get_bit_address (s, *start, &bm, &off);
303 get_bit_address (s, finish, &end_bm, &end_off);
304 if (bm > SB_BMAP_NR(s))
306 if (end_bm > SB_BMAP_NR(s))
307 end_bm = SB_BMAP_NR(s);
309 /* When the bitmap is more than 10% free, anyone can allocate.
310 * When it's less than 10% free, only files that already use the
311 * bitmap are allowed. Once we pass 80% full, this restriction
314 * We do this so that files that grow later still have space close to
315 * their original allocation. This improves locality, and presumably
316 * performance as a result.
318 * This is only an allocation policy and does not make up for getting a
319 * bad hint. Decent hinting must be implemented for this to work well.
321 if ( TEST_OPTION(skip_busy, s) && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s)/20 ) {
322 for (;bm < end_bm; bm++, off = 0) {
323 if ( ( off && (!unfm || (file_block != 0))) || SB_AP_BITMAP(s)[bm].free_count > (s->s_blocksize << 3) / 10 )
324 nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
328 /* we know from above that start is a reasonable number */
329 get_bit_address (s, *start, &bm, &off);
332 for (;bm < end_bm; bm++, off = 0) {
333 nr_allocated = scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
338 nr_allocated = scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
341 *start = bm * off_max + off;
346 static void _reiserfs_free_block (struct reiserfs_transaction_handle *th,
347 struct inode *inode, b_blocknr_t block,
350 struct super_block * s = th->t_super;
351 struct reiserfs_super_block * rs;
352 struct buffer_head * sbh;
353 struct reiserfs_bitmap_info *apbi;
356 BUG_ON (!th->t_trans_id);
358 PROC_INFO_INC( s, free_block );
360 rs = SB_DISK_SUPER_BLOCK (s);
361 sbh = SB_BUFFER_WITH_SB (s);
362 apbi = SB_AP_BITMAP(s);
364 get_bit_address (s, block, &nr, &offset);
366 if (nr >= sb_bmap_nr (rs)) {
367 reiserfs_warning (s, "vs-4075: reiserfs_free_block: "
368 "block %lu is out of range on %s",
369 block, reiserfs_bdevname (s));
373 reiserfs_prepare_for_journal(s, apbi[nr].bh, 1 ) ;
375 /* clear bit for the given block in bit map */
376 if (!reiserfs_test_and_clear_le_bit (offset, apbi[nr].bh->b_data)) {
377 reiserfs_warning (s, "vs-4080: reiserfs_free_block: "
378 "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
379 reiserfs_bdevname (s), block);
381 apbi[nr].free_count ++;
382 journal_mark_dirty (th, s, apbi[nr].bh);
384 reiserfs_prepare_for_journal(s, sbh, 1) ;
385 /* update super block */
386 set_sb_free_blocks( rs, sb_free_blocks(rs) + 1 );
388 journal_mark_dirty (th, s, sbh);
390 DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
393 void reiserfs_free_block (struct reiserfs_transaction_handle *th,
394 struct inode *inode, b_blocknr_t block,
397 struct super_block * s = th->t_super;
399 BUG_ON (!th->t_trans_id);
401 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
402 RFALSE(is_reusable (s, block, 1) == 0, "vs-4071: can not free such block");
403 /* mark it before we clear it, just in case */
404 journal_mark_freed(th, s, block) ;
405 _reiserfs_free_block(th, inode, block, for_unformatted) ;
408 /* preallocated blocks don't need to be run through journal_mark_freed */
409 static void reiserfs_free_prealloc_block (struct reiserfs_transaction_handle *th,
410 struct inode *inode, b_blocknr_t block) {
411 RFALSE(!th->t_super, "vs-4060: trying to free block on nonexistent device");
412 RFALSE(is_reusable (th->t_super, block, 1) == 0, "vs-4070: can not free such block");
413 BUG_ON (!th->t_trans_id);
414 _reiserfs_free_block(th, inode, block, 1) ;
417 static void __discard_prealloc (struct reiserfs_transaction_handle * th,
418 struct reiserfs_inode_info *ei)
420 unsigned long save = ei->i_prealloc_block ;
422 struct inode *inode = &ei->vfs_inode;
423 BUG_ON (!th->t_trans_id);
424 #ifdef CONFIG_REISERFS_CHECK
425 if (ei->i_prealloc_count < 0)
426 reiserfs_warning (th->t_super, "zam-4001:%s: inode has negative prealloc blocks count.", __FUNCTION__ );
428 while (ei->i_prealloc_count > 0) {
429 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
430 ei->i_prealloc_block++;
431 ei->i_prealloc_count --;
435 reiserfs_update_sd(th, inode);
436 ei->i_prealloc_block = save;
437 list_del_init(&(ei->i_prealloc_list));
440 /* FIXME: It should be inline function */
441 void reiserfs_discard_prealloc (struct reiserfs_transaction_handle *th,
444 struct reiserfs_inode_info *ei = REISERFS_I(inode);
445 BUG_ON (!th->t_trans_id);
446 if (ei->i_prealloc_count)
447 __discard_prealloc(th, ei);
450 void reiserfs_discard_all_prealloc (struct reiserfs_transaction_handle *th)
452 struct list_head * plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
454 BUG_ON (!th->t_trans_id);
456 while (!list_empty(plist)) {
457 struct reiserfs_inode_info *ei;
458 ei = list_entry(plist->next, struct reiserfs_inode_info, i_prealloc_list);
459 #ifdef CONFIG_REISERFS_CHECK
460 if (!ei->i_prealloc_count) {
461 reiserfs_warning (th->t_super, "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.", __FUNCTION__);
464 __discard_prealloc(th, ei);
468 void reiserfs_init_alloc_options (struct super_block *s)
470 set_bit (_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
471 set_bit (_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
472 set_bit (_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
475 /* block allocator related options are parsed here */
476 int reiserfs_parse_alloc_options(struct super_block * s, char * options)
478 char * this_char, * value;
480 REISERFS_SB(s)->s_alloc_options.bits = 0; /* clear default settings */
482 while ( (this_char = strsep (&options, ":")) != NULL ) {
483 if ((value = strchr (this_char, '=')) != NULL)
486 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
488 SET_OPTION(concentrating_formatted_nodes);
489 temp = (value && *value) ? simple_strtoul (value, &value, 0) : 10;
490 if (temp <= 0 || temp > 100) {
491 REISERFS_SB(s)->s_alloc_options.border = 10;
493 REISERFS_SB(s)->s_alloc_options.border = 100 / temp;
497 if (!strcmp(this_char, "displacing_large_files")) {
498 SET_OPTION(displacing_large_files);
499 REISERFS_SB(s)->s_alloc_options.large_file_size =
500 (value && *value) ? simple_strtoul (value, &value, 0) : 16;
503 if (!strcmp(this_char, "displacing_new_packing_localities")) {
504 SET_OPTION(displacing_new_packing_localities);
508 if (!strcmp(this_char, "old_hashed_relocation")) {
509 SET_OPTION(old_hashed_relocation);
513 if (!strcmp(this_char, "new_hashed_relocation")) {
514 SET_OPTION(new_hashed_relocation);
518 if (!strcmp(this_char, "dirid_groups")) {
519 SET_OPTION(dirid_groups);
522 if (!strcmp(this_char, "oid_groups")) {
523 SET_OPTION(oid_groups);
526 if (!strcmp(this_char, "packing_groups")) {
527 SET_OPTION(packing_groups);
530 if (!strcmp(this_char, "hashed_formatted_nodes")) {
531 SET_OPTION(hashed_formatted_nodes);
535 if (!strcmp(this_char, "skip_busy")) {
536 SET_OPTION(skip_busy);
540 if (!strcmp(this_char, "hundredth_slices")) {
541 SET_OPTION(hundredth_slices);
545 if (!strcmp(this_char, "old_way")) {
550 if (!strcmp(this_char, "displace_based_on_dirid")) {
551 SET_OPTION(displace_based_on_dirid);
555 if (!strcmp(this_char, "preallocmin")) {
556 REISERFS_SB(s)->s_alloc_options.preallocmin =
557 (value && *value) ? simple_strtoul (value, &value, 0) : 4;
561 if (!strcmp(this_char, "preallocsize")) {
562 REISERFS_SB(s)->s_alloc_options.preallocsize =
563 (value && *value) ? simple_strtoul (value, &value, 0) : PREALLOCATION_SIZE;
567 reiserfs_warning (s, "zam-4001: %s : unknown option - %s",
568 __FUNCTION__ , this_char);
572 reiserfs_warning (s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
576 static inline void new_hashed_relocation (reiserfs_blocknr_hint_t * hint)
579 if (hint->formatted_node) {
580 hash_in = (char*)&hint->key.k_dir_id;
583 //hint->search_start = hint->beg;
584 hash_in = (char*)&hint->key.k_dir_id;
586 if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
587 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
589 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
592 hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
596 * Relocation based on dirid, hashing them into a given bitmap block
597 * files. Formatted nodes are unaffected, a seperate policy covers them
600 dirid_groups (reiserfs_blocknr_hint_t *hint)
605 struct super_block *sb = hint->th->t_super;
607 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
608 else if (hint->formatted_node)
609 dirid = hint->key.k_dir_id;
612 bm = bmap_hash_id(sb, dirid);
613 hash = bm * (sb->s_blocksize << 3);
614 /* give a portion of the block group to metadata */
616 hash += sb->s_blocksize/2;
617 hint->search_start = hash;
622 * Relocation based on oid, hashing them into a given bitmap block
623 * files. Formatted nodes are unaffected, a seperate policy covers them
626 oid_groups (reiserfs_blocknr_hint_t *hint)
634 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
636 /* keep the root dir and it's first set of subdirs close to
637 * the start of the disk
640 hash = (hint->inode->i_sb->s_blocksize << 3);
642 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
643 bm = bmap_hash_id(hint->inode->i_sb, oid);
644 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
646 hint->search_start = hash;
650 /* returns 1 if it finds an indirect item and gets valid hint info
651 * from it, otherwise 0
653 static int get_left_neighbor(reiserfs_blocknr_hint_t *hint)
656 struct buffer_head * bh;
657 struct item_head * ih;
662 if (!hint->path) /* reiserfs code can call this function w/o pointer to path
663 * structure supplied; then we rely on supplied search_start */
667 bh = get_last_bh(path);
668 RFALSE( !bh, "green-4002: Illegal path specified to get_left_neighbor");
670 pos_in_item = path->pos_in_item;
671 item = get_item (path);
673 hint->search_start = bh->b_blocknr;
675 if (!hint->formatted_node && is_indirect_le_ih (ih)) {
676 /* for indirect item: go to left and look for the first non-hole entry
677 in the indirect item */
678 if (pos_in_item == I_UNFM_NUM (ih))
680 // pos_in_item = I_UNFM_NUM (ih) - 1;
681 while (pos_in_item >= 0) {
682 int t=get_block_num(item,pos_in_item);
684 hint->search_start = t;
692 /* does result value fit into specified region? */
696 /* should be, if formatted node, then try to put on first part of the device
697 specified as number of percent with mount option device, else try to put
698 on last of device. This is not to say it is good code to do so,
699 but the effect should be measured. */
700 static inline void set_border_in_hint(struct super_block *s, reiserfs_blocknr_hint_t *hint)
702 b_blocknr_t border = SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
704 if (hint->formatted_node)
705 hint->end = border - 1;
710 static inline void displace_large_file(reiserfs_blocknr_hint_t *hint)
712 if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
713 hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id), 4) % (hint->end - hint->beg);
715 hint->search_start = hint->beg + keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid), 4) % (hint->end - hint->beg);
718 static inline void hash_formatted_node(reiserfs_blocknr_hint_t *hint)
723 hash_in = (char*)&hint->key.k_dir_id;
724 else if ( TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
725 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
727 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
729 hint->search_start = hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
732 static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *hint)
734 return hint->block == REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
737 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
738 static inline void displace_new_packing_locality (reiserfs_blocknr_hint_t *hint)
740 struct in_core_key * key = &hint->key;
742 hint->th->displace_new_blocks = 0;
743 hint->search_start = hint->beg + keyed_hash((char*)(&key->k_objectid),4) % (hint->end - hint->beg);
747 static inline int old_hashed_relocation (reiserfs_blocknr_hint_t * hint)
752 if (hint->formatted_node || hint->inode == NULL) {
756 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
757 border = hint->beg + (u32) keyed_hash(((char *) (&hash_in)), 4) % (hint->end - hint->beg - 1);
758 if (border > hint->search_start)
759 hint->search_start = border;
764 static inline int old_way (reiserfs_blocknr_hint_t * hint)
768 if (hint->formatted_node || hint->inode == NULL) {
772 border = hint->beg + le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end - hint->beg);
773 if (border > hint->search_start)
774 hint->search_start = border;
779 static inline void hundredth_slices (reiserfs_blocknr_hint_t * hint)
781 struct in_core_key * key = &hint->key;
782 b_blocknr_t slice_start;
784 slice_start = (keyed_hash((char*)(&key->k_dir_id),4) % 100) * (hint->end / 100);
785 if ( slice_start > hint->search_start || slice_start + (hint->end / 100) <= hint->search_start) {
786 hint->search_start = slice_start;
790 static void determine_search_start(reiserfs_blocknr_hint_t *hint,
793 struct super_block *s = hint->th->t_super;
797 hint->end = SB_BLOCK_COUNT(s) - 1;
799 /* This is former border algorithm. Now with tunable border offset */
800 if (concentrating_formatted_nodes(s))
801 set_border_in_hint(s, hint);
803 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
804 /* whenever we create a new directory, we displace it. At first we will
805 hash for location, later we might look for a moderately empty place for
807 if (displacing_new_packing_localities(s)
808 && hint->th->displace_new_blocks) {
809 displace_new_packing_locality(hint);
811 /* we do not continue determine_search_start,
812 * if new packing locality is being displaced */
817 /* all persons should feel encouraged to add more special cases here and
820 if (displacing_large_files(s) && !hint->formatted_node
821 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
822 displace_large_file(hint);
826 /* if none of our special cases is relevant, use the left neighbor in the
827 tree order of the new node we are allocating for */
828 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes,s)) {
829 hash_formatted_node(hint);
833 unfm_hint = get_left_neighbor(hint);
835 /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation,
836 new blocks are displaced based on directory ID. Also, if suggested search_start
837 is less than last preallocated block, we start searching from it, assuming that
838 HDD dataflow is faster in forward direction */
839 if ( TEST_OPTION(old_way, s)) {
840 if (!hint->formatted_node) {
841 if ( !reiserfs_hashed_relocation(s))
843 else if (!reiserfs_no_unhashed_relocation(s))
844 old_hashed_relocation(hint);
846 if ( hint->inode && hint->search_start < REISERFS_I(hint->inode)->i_prealloc_block)
847 hint->search_start = REISERFS_I(hint->inode)->i_prealloc_block;
852 /* This is an approach proposed by Hans */
853 if ( TEST_OPTION(hundredth_slices, s) && ! (displacing_large_files(s) && !hint->formatted_node)) {
854 hundredth_slices(hint);
858 /* old_hashed_relocation only works on unformatted */
859 if (!unfm_hint && !hint->formatted_node &&
860 TEST_OPTION(old_hashed_relocation, s))
862 old_hashed_relocation(hint);
864 /* new_hashed_relocation works with both formatted/unformatted nodes */
865 if ((!unfm_hint || hint->formatted_node) &&
866 TEST_OPTION(new_hashed_relocation, s))
868 new_hashed_relocation(hint);
870 /* dirid grouping works only on unformatted nodes */
871 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups,s))
876 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
877 if (hint->formatted_node && TEST_OPTION(dirid_groups,s))
883 /* oid grouping works only on unformatted nodes */
884 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups,s))
891 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
893 /* make minimum size a mount option and benchmark both ways */
894 /* we preallocate blocks only for regular files, specific size */
895 /* benchmark preallocating always and see what happens */
897 hint->prealloc_size = 0;
899 if (!hint->formatted_node && hint->preallocate) {
900 if (S_ISREG(hint->inode->i_mode)
901 && hint->inode->i_size >= REISERFS_SB(hint->th->t_super)->s_alloc_options.preallocmin * hint->inode->i_sb->s_blocksize)
902 hint->prealloc_size = REISERFS_SB(hint->th->t_super)->s_alloc_options.preallocsize - 1;
907 /* XXX I know it could be merged with upper-level function;
908 but may be result function would be too complex. */
909 static inline int allocate_without_wrapping_disk (reiserfs_blocknr_hint_t * hint,
910 b_blocknr_t * new_blocknrs,
911 b_blocknr_t start, b_blocknr_t finish,
913 int amount_needed, int prealloc_size)
915 int rest = amount_needed;
918 while (rest > 0 && start <= finish) {
919 nr_allocated = scan_bitmap (hint->th, &start, finish, min,
920 rest + prealloc_size, !hint->formatted_node,
923 if (nr_allocated == 0) /* no new blocks allocated, return */
926 /* fill free_blocknrs array first */
927 while (rest > 0 && nr_allocated > 0) {
928 * new_blocknrs ++ = start ++;
929 rest --; nr_allocated --;
932 /* do we have something to fill prealloc. array also ? */
933 if (nr_allocated > 0) {
934 /* it means prealloc_size was greater that 0 and we do preallocation */
935 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
936 &SB_JOURNAL(hint->th->t_super)->j_prealloc_list);
937 REISERFS_I(hint->inode)->i_prealloc_block = start;
938 REISERFS_I(hint->inode)->i_prealloc_count = nr_allocated;
943 return (amount_needed - rest);
946 static inline int blocknrs_and_prealloc_arrays_from_search_start
947 (reiserfs_blocknr_hint_t *hint, b_blocknr_t *new_blocknrs, int amount_needed)
949 struct super_block *s = hint->th->t_super;
950 b_blocknr_t start = hint->search_start;
951 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
953 int nr_allocated = 0;
956 determine_prealloc_size(hint);
957 if (!hint->formatted_node) {
959 #ifdef REISERQUOTA_DEBUG
960 reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: allocating %d blocks id=%u", amount_needed, hint->inode->i_uid);
962 quota_ret = DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
963 if (quota_ret) /* Quota exceeded? */
964 return QUOTA_EXCEEDED;
965 if (hint->preallocate && hint->prealloc_size ) {
966 #ifdef REISERQUOTA_DEBUG
967 reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid);
969 quota_ret = DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode, hint->prealloc_size);
971 hint->preallocate=hint->prealloc_size=0;
973 /* for unformatted nodes, force large allocations */
974 bigalloc = amount_needed;
978 /* in bigalloc mode, nr_allocated should stay zero until
979 * the entire allocation is filled
981 if (unlikely(bigalloc && nr_allocated)) {
982 reiserfs_warning(s, "bigalloc is %d, nr_allocated %d\n",
983 bigalloc, nr_allocated);
984 /* reset things to a sane value */
985 bigalloc = amount_needed - nr_allocated;
988 * try pass 0 and pass 1 looking for a nice big
989 * contiguous allocation. Then reset and look
990 * for anything you can find.
992 if (passno == 2 && bigalloc) {
997 case 0: /* Search from hint->search_start to end of disk */
998 start = hint->search_start;
999 finish = SB_BLOCK_COUNT(s) - 1;
1001 case 1: /* Search from hint->beg to hint->search_start */
1003 finish = hint->search_start;
1005 case 2: /* Last chance: Search from 0 to hint->beg */
1009 default: /* We've tried searching everywhere, not enough space */
1010 /* Free the blocks */
1011 if (!hint->formatted_node) {
1012 #ifdef REISERQUOTA_DEBUG
1013 reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: freeing (nospace) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated, hint->inode->i_uid);
1015 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */
1017 while (nr_allocated --)
1018 reiserfs_free_block(hint->th, hint->inode, new_blocknrs[nr_allocated], !hint->formatted_node);
1020 return NO_DISK_SPACE;
1022 } while ((nr_allocated += allocate_without_wrapping_disk (hint,
1023 new_blocknrs + nr_allocated, start, finish,
1024 bigalloc ? bigalloc : 1,
1025 amount_needed - nr_allocated,
1026 hint->prealloc_size))
1028 if ( !hint->formatted_node &&
1029 amount_needed + hint->prealloc_size >
1030 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1031 /* Some of preallocation blocks were not allocated */
1032 #ifdef REISERQUOTA_DEBUG
1033 reiserfs_debug (s, REISERFS_DEBUG_CODE, "reiserquota: freeing (failed prealloc) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid);
1035 DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
1036 hint->prealloc_size - nr_allocated -
1037 REISERFS_I(hint->inode)->i_prealloc_count);
1043 /* grab new blocknrs from preallocated list */
1044 /* return amount still needed after using them */
1045 static int use_preallocated_list_if_available (reiserfs_blocknr_hint_t *hint,
1046 b_blocknr_t *new_blocknrs, int amount_needed)
1048 struct inode * inode = hint->inode;
1050 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1051 while (amount_needed) {
1053 *new_blocknrs ++ = REISERFS_I(inode)->i_prealloc_block ++;
1054 REISERFS_I(inode)->i_prealloc_count --;
1058 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1059 list_del(&REISERFS_I(inode)->i_prealloc_list);
1064 /* return amount still needed after using preallocated blocks */
1065 return amount_needed;
1068 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
1069 b_blocknr_t * new_blocknrs, int amount_needed,
1070 int reserved_by_us /* Amount of blocks we have
1071 already reserved */)
1073 int initial_amount_needed = amount_needed;
1075 struct super_block *s = hint->th->t_super;
1077 /* Check if there is enough space, taking into account reserved space */
1078 if ( SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1079 amount_needed - reserved_by_us)
1080 return NO_DISK_SPACE;
1081 /* should this be if !hint->inode && hint->preallocate? */
1082 /* do you mean hint->formatted_node can be removed ? - Zam */
1083 /* hint->formatted_node cannot be removed because we try to access
1084 inode information here, and there is often no inode assotiated with
1085 metadata allocations - green */
1087 if (!hint->formatted_node && hint->preallocate) {
1088 amount_needed = use_preallocated_list_if_available
1089 (hint, new_blocknrs, amount_needed);
1090 if (amount_needed == 0) /* all blocknrs we need we got from
1093 new_blocknrs += (initial_amount_needed - amount_needed);
1096 /* find search start and save it in hint structure */
1097 determine_search_start(hint, amount_needed);
1098 if (hint->search_start >= SB_BLOCK_COUNT(s))
1099 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1101 /* allocation itself; fill new_blocknrs and preallocation arrays */
1102 ret = blocknrs_and_prealloc_arrays_from_search_start
1103 (hint, new_blocknrs, amount_needed);
1105 /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we
1106 * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second
1109 if (ret != CARRY_ON) {
1110 while (amount_needed ++ < initial_amount_needed) {
1111 reiserfs_free_block(hint->th, hint->inode, *(--new_blocknrs), 1);
1117 /* These 2 functions are here to provide blocks reservation to the rest of kernel */
1118 /* Reserve @blocks amount of blocks in fs pointed by @sb. Caller must make sure
1119 there are actually this much blocks on the FS available */
1120 void reiserfs_claim_blocks_to_be_allocated(
1121 struct super_block *sb, /* super block of
1125 int blocks /* How much to reserve */
1129 /* Fast case, if reservation is zero - exit immediately. */
1133 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1134 REISERFS_SB(sb)->reserved_blocks += blocks;
1135 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1138 /* Unreserve @blocks amount of blocks in fs pointed by @sb */
1139 void reiserfs_release_claimed_blocks(
1140 struct super_block *sb, /* super block of
1144 int blocks /* How much to unreserve */
1148 /* Fast case, if unreservation is zero - exit immediately. */
1152 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1153 REISERFS_SB(sb)->reserved_blocks -= blocks;
1154 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1155 RFALSE( REISERFS_SB(sb)->reserved_blocks < 0, "amount of blocks reserved became zero?");
1158 /* This function estimates how much pages we will be able to write to FS
1159 used for reiserfs_file_write() purposes for now. */
1160 int reiserfs_can_fit_pages ( struct super_block *sb /* superblock of filesystem
1161 to estimate space */ )
1165 spin_lock(&REISERFS_SB(sb)->bitmap_lock);
1166 space = (SB_FREE_BLOCKS(sb) - REISERFS_SB(sb)->reserved_blocks) >> ( PAGE_CACHE_SHIFT - sb->s_blocksize_bits);
1167 spin_unlock(&REISERFS_SB(sb)->bitmap_lock);
1169 return space>0?space:0;