1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * metadata alloc and free
7 * Inspired by ext3 block groups.
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
32 #define MLOG_MASK_PREFIX ML_DISK_ALLOC
33 #include <cluster/masklog.h>
41 #include "localalloc.h"
47 #include "buffer_head_io.h"
49 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
50 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
51 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
52 static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
53 struct inode *alloc_inode,
54 struct buffer_head *bg_bh,
57 struct ocfs2_chain_list *cl);
58 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
59 struct inode *alloc_inode,
60 struct buffer_head *bh);
62 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
63 struct ocfs2_alloc_context *ac);
65 static int ocfs2_cluster_group_search(struct inode *inode,
66 struct buffer_head *group_bh,
67 u32 bits_wanted, u32 min_bits,
68 u16 *bit_off, u16 *bits_found);
69 static int ocfs2_block_group_search(struct inode *inode,
70 struct buffer_head *group_bh,
71 u32 bits_wanted, u32 min_bits,
72 u16 *bit_off, u16 *bits_found);
73 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
77 unsigned int *num_bits,
79 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
80 struct ocfs2_alloc_context *ac,
84 unsigned int *num_bits,
86 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
88 static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
89 struct buffer_head *bg_bh,
90 unsigned int bits_wanted,
93 static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
94 struct inode *alloc_inode,
95 struct ocfs2_group_desc *bg,
96 struct buffer_head *group_bh,
98 unsigned int num_bits);
99 static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
100 struct inode *alloc_inode,
101 struct ocfs2_group_desc *bg,
102 struct buffer_head *group_bh,
103 unsigned int bit_off,
104 unsigned int num_bits);
106 static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
107 struct inode *alloc_inode,
108 struct buffer_head *fe_bh,
109 struct buffer_head *bg_bh,
110 struct buffer_head *prev_bg_bh,
112 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
114 static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
115 struct inode *alloc_inode,
116 struct buffer_head *alloc_bh,
117 unsigned int start_bit,
120 static inline u64 ocfs2_which_suballoc_group(u64 block,
122 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
125 static inline u64 ocfs2_which_cluster_group(struct inode *inode,
127 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
132 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
141 static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
143 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
146 static int ocfs2_block_group_fill(struct ocfs2_journal_handle *handle,
147 struct inode *alloc_inode,
148 struct buffer_head *bg_bh,
151 struct ocfs2_chain_list *cl)
154 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
155 struct super_block * sb = alloc_inode->i_sb;
159 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
160 ocfs2_error(alloc_inode->i_sb, "group block (%"MLFu64") "
161 "!= b_blocknr (%llu)", group_blkno,
162 (unsigned long long) bg_bh->b_blocknr);
167 status = ocfs2_journal_access(handle,
170 OCFS2_JOURNAL_ACCESS_CREATE);
176 memset(bg, 0, sb->s_blocksize);
177 strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
178 bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
179 bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
180 bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
181 bg->bg_chain = cpu_to_le16(my_chain);
182 bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
183 bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
184 bg->bg_blkno = cpu_to_le64(group_blkno);
185 /* set the 1st bit in the bitmap to account for the descriptor block */
186 ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
187 bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
189 status = ocfs2_journal_dirty(handle, bg_bh);
193 /* There is no need to zero out or otherwise initialize the
194 * other blocks in a group - All valid FS metadata in a block
195 * group stores the superblock fs_generation value at
196 * allocation time. */
203 static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
208 while (curr < le16_to_cpu(cl->cl_count)) {
209 if (le32_to_cpu(cl->cl_recs[best].c_total) >
210 le32_to_cpu(cl->cl_recs[curr].c_total))
218 * We expect the block group allocator to already be locked.
220 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
221 struct inode *alloc_inode,
222 struct buffer_head *bh)
225 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
226 struct ocfs2_chain_list *cl;
227 struct ocfs2_alloc_context *ac = NULL;
228 struct ocfs2_journal_handle *handle = NULL;
229 u32 bit_off, num_bits;
232 struct buffer_head *bg_bh = NULL;
233 struct ocfs2_group_desc *bg;
235 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
239 handle = ocfs2_alloc_handle(osb);
246 cl = &fe->id2.i_chain;
247 status = ocfs2_reserve_clusters(osb,
249 le16_to_cpu(cl->cl_cpg),
252 if (status != -ENOSPC)
257 credits = ocfs2_calc_group_alloc_credits(osb->sb,
258 le16_to_cpu(cl->cl_cpg));
259 handle = ocfs2_start_trans(osb, handle, credits);
260 if (IS_ERR(handle)) {
261 status = PTR_ERR(handle);
267 status = ocfs2_claim_clusters(osb,
270 le16_to_cpu(cl->cl_cpg),
274 if (status != -ENOSPC)
279 alloc_rec = ocfs2_find_smallest_chain(cl);
281 /* setup the group */
282 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
283 mlog(0, "new descriptor, record %u, at block %"MLFu64"\n",
284 alloc_rec, bg_blkno);
286 bg_bh = sb_getblk(osb->sb, bg_blkno);
292 ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
294 status = ocfs2_block_group_fill(handle,
305 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
307 status = ocfs2_journal_access(handle, alloc_inode,
308 bh, OCFS2_JOURNAL_ACCESS_WRITE);
314 le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
315 le16_to_cpu(bg->bg_free_bits_count));
316 le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
317 cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
318 if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
319 le16_add_cpu(&cl->cl_next_free_rec, 1);
321 le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
322 le16_to_cpu(bg->bg_free_bits_count));
323 le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
324 le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
326 status = ocfs2_journal_dirty(handle, bh);
332 spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
333 OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
334 fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
335 le32_to_cpu(fe->i_clusters)));
336 spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
337 i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
338 alloc_inode->i_blocks =
339 ocfs2_align_bytes_to_sectors(i_size_read(alloc_inode));
344 ocfs2_commit_trans(handle);
347 ocfs2_free_alloc_context(ac);
356 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
357 struct ocfs2_alloc_context *ac)
360 u32 bits_wanted = ac->ac_bits_wanted;
361 struct inode *alloc_inode = ac->ac_inode;
362 struct buffer_head *bh = NULL;
363 struct ocfs2_journal_handle *handle = ac->ac_handle;
364 struct ocfs2_dinode *fe;
369 BUG_ON(handle->flags & OCFS2_HANDLE_STARTED);
371 ocfs2_handle_add_inode(handle, alloc_inode);
372 status = ocfs2_meta_lock(alloc_inode, handle, &bh, 1);
378 fe = (struct ocfs2_dinode *) bh->b_data;
379 if (!OCFS2_IS_VALID_DINODE(fe)) {
380 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
384 if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
385 ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator "
386 "# %"MLFu64, le64_to_cpu(fe->i_blkno));
391 free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
392 le32_to_cpu(fe->id1.bitmap1.i_used);
394 if (bits_wanted > free_bits) {
395 /* cluster bitmap never grows */
396 if (ocfs2_is_cluster_bitmap(alloc_inode)) {
397 mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
398 bits_wanted, free_bits);
403 status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
405 if (status != -ENOSPC)
409 atomic_inc(&osb->alloc_stats.bg_extends);
411 /* You should never ask for this much metadata */
413 (le32_to_cpu(fe->id1.bitmap1.i_total)
414 - le32_to_cpu(fe->id1.bitmap1.i_used)));
427 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
428 struct ocfs2_journal_handle *handle,
429 struct ocfs2_dinode *fe,
430 struct ocfs2_alloc_context **ac)
433 struct inode *alloc_inode = NULL;
435 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
442 (*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe);
443 (*ac)->ac_handle = handle;
444 (*ac)->ac_which = OCFS2_AC_USE_META;
446 #ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
447 alloc_inode = ocfs2_get_system_file_inode(osb,
448 EXTENT_ALLOC_SYSTEM_INODE,
451 alloc_inode = ocfs2_get_system_file_inode(osb,
452 EXTENT_ALLOC_SYSTEM_INODE,
461 (*ac)->ac_inode = igrab(alloc_inode);
462 (*ac)->ac_group_search = ocfs2_block_group_search;
464 status = ocfs2_reserve_suballoc_bits(osb, (*ac));
466 if (status != -ENOSPC)
473 if ((status < 0) && *ac) {
474 ocfs2_free_alloc_context(*ac);
485 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
486 struct ocfs2_journal_handle *handle,
487 struct ocfs2_alloc_context **ac)
490 struct inode *alloc_inode = NULL;
492 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
499 (*ac)->ac_bits_wanted = 1;
500 (*ac)->ac_handle = handle;
501 (*ac)->ac_which = OCFS2_AC_USE_INODE;
503 alloc_inode = ocfs2_get_system_file_inode(osb,
504 INODE_ALLOC_SYSTEM_INODE,
512 (*ac)->ac_inode = igrab(alloc_inode);
513 (*ac)->ac_group_search = ocfs2_block_group_search;
515 status = ocfs2_reserve_suballoc_bits(osb, *ac);
517 if (status != -ENOSPC)
524 if ((status < 0) && *ac) {
525 ocfs2_free_alloc_context(*ac);
536 /* local alloc code has to do the same thing, so rather than do this
538 int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
539 struct ocfs2_alloc_context *ac)
543 ac->ac_inode = ocfs2_get_system_file_inode(osb,
544 GLOBAL_BITMAP_SYSTEM_INODE,
548 mlog(ML_ERROR, "Could not get bitmap inode!\n");
551 ac->ac_which = OCFS2_AC_USE_MAIN;
552 ac->ac_group_search = ocfs2_cluster_group_search;
554 status = ocfs2_reserve_suballoc_bits(osb, ac);
555 if (status < 0 && status != -ENOSPC)
561 /* Callers don't need to care which bitmap (local alloc or main) to
562 * use so we figure it out for them, but unfortunately this clutters
564 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
565 struct ocfs2_journal_handle *handle,
567 struct ocfs2_alloc_context **ac)
575 *ac = kcalloc(1, sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
582 (*ac)->ac_bits_wanted = bits_wanted;
583 (*ac)->ac_handle = handle;
586 if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
587 status = ocfs2_reserve_local_alloc_bits(osb,
591 if ((status < 0) && (status != -ENOSPC)) {
594 } else if (status == -ENOSPC) {
595 /* reserve_local_bits will return enospc with
596 * the local alloc inode still locked, so we
597 * can change this safely here. */
598 mlog(0, "Disabling local alloc\n");
599 /* We set to OCFS2_LA_DISABLED so that umount
600 * can clean up what's left of the local
602 osb->local_alloc_state = OCFS2_LA_DISABLED;
606 if (status == -ENOSPC) {
607 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
609 if (status != -ENOSPC)
617 if ((status < 0) && *ac) {
618 ocfs2_free_alloc_context(*ac);
627 * More or less lifted from ext3. I'll leave their description below:
629 * "For ext3 allocations, we must not reuse any blocks which are
630 * allocated in the bitmap buffer's "last committed data" copy. This
631 * prevents deletes from freeing up the page for reuse until we have
632 * committed the delete transaction.
634 * If we didn't do this, then deleting something and reallocating it as
635 * data would allow the old block to be overwritten before the
636 * transaction committed (because we force data to disk before commit).
637 * This would lead to corruption if we crashed between overwriting the
638 * data and committing the delete.
640 * @@@ We may want to make this allocation behaviour conditional on
641 * data-writes at some point, and disable it for metadata allocations or
644 * Note: OCFS2 already does this differently for metadata vs data
645 * allocations, as those bitmaps are seperate and undo access is never
646 * called on a metadata group descriptor.
648 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
651 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
653 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
655 if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
658 bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
659 return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
662 static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
663 struct buffer_head *bg_bh,
664 unsigned int bits_wanted,
669 u16 best_offset, best_size;
670 int offset, start, found, status = 0;
671 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
673 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
674 OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg);
678 found = start = best_offset = best_size = 0;
679 bitmap = bg->bg_bitmap;
681 while((offset = ocfs2_find_next_zero_bit(bitmap,
682 le16_to_cpu(bg->bg_bits),
684 if (offset == le16_to_cpu(bg->bg_bits))
687 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
688 /* We found a zero, but we can't use it as it
689 * hasn't been put to disk yet! */
692 } else if (offset == start) {
693 /* we found a zero */
695 /* move start to the next bit to test */
698 /* got a zero after some ones */
702 if (found > best_size) {
704 best_offset = start - found;
706 /* we got everything we needed */
707 if (found == bits_wanted) {
708 /* mlog(0, "Found it all!\n"); */
713 /* XXX: I think the first clause is equivalent to the second
715 if (found == bits_wanted) {
716 *bit_off = start - found;
718 } else if (best_size) {
719 *bit_off = best_offset;
720 *bits_found = best_size;
723 /* No error log here -- see the comment above
724 * ocfs2_test_bg_bit_allocatable */
730 static inline int ocfs2_block_group_set_bits(struct ocfs2_journal_handle *handle,
731 struct inode *alloc_inode,
732 struct ocfs2_group_desc *bg,
733 struct buffer_head *group_bh,
734 unsigned int bit_off,
735 unsigned int num_bits)
738 void *bitmap = bg->bg_bitmap;
739 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
743 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
744 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
748 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
750 mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
753 if (ocfs2_is_cluster_bitmap(alloc_inode))
754 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
756 status = ocfs2_journal_access(handle,
765 le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
768 ocfs2_set_bit(bit_off++, bitmap);
770 status = ocfs2_journal_dirty(handle,
782 /* find the one with the most empty bits */
783 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
787 BUG_ON(!cl->cl_next_free_rec);
790 while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
791 if (le32_to_cpu(cl->cl_recs[curr].c_free) >
792 le32_to_cpu(cl->cl_recs[best].c_free))
797 BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
801 static int ocfs2_relink_block_group(struct ocfs2_journal_handle *handle,
802 struct inode *alloc_inode,
803 struct buffer_head *fe_bh,
804 struct buffer_head *bg_bh,
805 struct buffer_head *prev_bg_bh,
809 /* there is a really tiny chance the journal calls could fail,
810 * but we wouldn't want inconsistent blocks in *any* case. */
811 u64 fe_ptr, bg_ptr, prev_bg_ptr;
812 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
813 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
814 struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
816 if (!OCFS2_IS_VALID_DINODE(fe)) {
817 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
821 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
822 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
826 if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) {
827 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg);
832 mlog(0, "In suballoc %"MLFu64", chain %u, move group %"MLFu64" to "
833 "top, prev = %"MLFu64"\n",
834 fe->i_blkno, chain, bg->bg_blkno, prev_bg->bg_blkno);
836 fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
837 bg_ptr = le64_to_cpu(bg->bg_next_group);
838 prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
840 status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
841 OCFS2_JOURNAL_ACCESS_WRITE);
847 prev_bg->bg_next_group = bg->bg_next_group;
849 status = ocfs2_journal_dirty(handle, prev_bg_bh);
855 status = ocfs2_journal_access(handle, alloc_inode, bg_bh,
856 OCFS2_JOURNAL_ACCESS_WRITE);
862 bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
864 status = ocfs2_journal_dirty(handle, bg_bh);
870 status = ocfs2_journal_access(handle, alloc_inode, fe_bh,
871 OCFS2_JOURNAL_ACCESS_WRITE);
877 fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
879 status = ocfs2_journal_dirty(handle, fe_bh);
888 fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
889 bg->bg_next_group = cpu_to_le64(bg_ptr);
890 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
897 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
900 return le16_to_cpu(bg->bg_free_bits_count) > wanted;
903 /* return 0 on success, -ENOSPC to keep searching and any other < 0
905 static int ocfs2_cluster_group_search(struct inode *inode,
906 struct buffer_head *group_bh,
907 u32 bits_wanted, u32 min_bits,
908 u16 *bit_off, u16 *bits_found)
910 int search = -ENOSPC;
912 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
913 u16 tmp_off, tmp_found;
915 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
917 if (bg->bg_free_bits_count) {
918 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
919 group_bh, bits_wanted,
920 &tmp_off, &tmp_found);
924 /* ocfs2_block_group_find_clear_bits() might
925 * return success, but we still want to return
926 * -ENOSPC unless it found the minimum number
928 if (min_bits <= tmp_found) {
930 *bits_found = tmp_found;
931 search = 0; /* success */
938 static int ocfs2_block_group_search(struct inode *inode,
939 struct buffer_head *group_bh,
940 u32 bits_wanted, u32 min_bits,
941 u16 *bit_off, u16 *bits_found)
944 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
946 BUG_ON(min_bits != 1);
947 BUG_ON(ocfs2_is_cluster_bitmap(inode));
949 if (bg->bg_free_bits_count)
950 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
951 group_bh, bits_wanted,
952 bit_off, bits_found);
957 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
961 unsigned int *num_bits,
968 struct ocfs2_journal_handle *handle = ac->ac_handle;
969 struct inode *alloc_inode = ac->ac_inode;
970 struct buffer_head *group_bh = NULL;
971 struct buffer_head *prev_group_bh = NULL;
972 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
973 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
974 struct ocfs2_group_desc *bg;
976 chain = ac->ac_chain;
977 mlog(0, "trying to alloc %u bits from chain %u, inode %"MLFu64"\n",
978 bits_wanted, chain, OCFS2_I(alloc_inode)->ip_blkno);
980 status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
981 le64_to_cpu(cl->cl_recs[chain].c_blkno),
982 &group_bh, OCFS2_BH_CACHED, alloc_inode);
987 bg = (struct ocfs2_group_desc *) group_bh->b_data;
988 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
989 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
995 /* for now, the chain search is a bit simplistic. We just use
996 * the 1st group with any empty bits. */
997 while ((status = ac->ac_group_search(alloc_inode, group_bh,
998 bits_wanted, min_bits, bit_off,
999 &tmp_bits)) == -ENOSPC) {
1000 if (!bg->bg_next_group)
1003 if (prev_group_bh) {
1004 brelse(prev_group_bh);
1005 prev_group_bh = NULL;
1007 next_group = le64_to_cpu(bg->bg_next_group);
1008 prev_group_bh = group_bh;
1010 status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
1011 next_group, &group_bh,
1012 OCFS2_BH_CACHED, alloc_inode);
1017 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1018 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
1019 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
1025 if (status != -ENOSPC)
1030 mlog(0, "alloc succeeds: we give %u bits from block group %"MLFu64"\n",
1031 tmp_bits, bg->bg_blkno);
1033 *num_bits = tmp_bits;
1035 BUG_ON(*num_bits == 0);
1038 * Keep track of previous block descriptor read. When
1039 * we find a target, if we have read more than X
1040 * number of descriptors, and the target is reasonably
1041 * empty, relink him to top of his chain.
1043 * We've read 0 extra blocks and only send one more to
1044 * the transaction, yet the next guy to search has a
1047 * Do this *after* figuring out how many bits we're taking out
1048 * of our target group.
1050 if (ac->ac_allow_chain_relink &&
1052 (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
1053 status = ocfs2_relink_block_group(handle, alloc_inode,
1054 ac->ac_bh, group_bh,
1055 prev_group_bh, chain);
1062 /* Ok, claim our bits now: set the info on dinode, chainlist
1063 * and then the group */
1064 status = ocfs2_journal_access(handle,
1067 OCFS2_JOURNAL_ACCESS_WRITE);
1073 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1074 fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
1075 le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
1077 status = ocfs2_journal_dirty(handle,
1084 status = ocfs2_block_group_set_bits(handle,
1095 mlog(0, "Allocated %u bits from suballocator %"MLFu64"\n",
1096 *num_bits, fe->i_blkno);
1098 *bg_blkno = le64_to_cpu(bg->bg_blkno);
1103 brelse(prev_group_bh);
1109 /* will give out up to bits_wanted contiguous bits. */
1110 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1111 struct ocfs2_alloc_context *ac,
1115 unsigned int *num_bits,
1120 struct ocfs2_chain_list *cl;
1121 struct ocfs2_dinode *fe;
1125 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1126 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
1129 fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
1130 if (!OCFS2_IS_VALID_DINODE(fe)) {
1131 OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe);
1135 if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
1136 le32_to_cpu(fe->id1.bitmap1.i_total)) {
1137 ocfs2_error(osb->sb, "Chain allocator dinode %"MLFu64" has %u"
1138 "used bits but only %u total.",
1139 le64_to_cpu(fe->i_blkno),
1140 le32_to_cpu(fe->id1.bitmap1.i_used),
1141 le32_to_cpu(fe->id1.bitmap1.i_total));
1146 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1148 victim = ocfs2_find_victim_chain(cl);
1149 ac->ac_chain = victim;
1150 ac->ac_allow_chain_relink = 1;
1152 status = ocfs2_search_chain(ac, bits_wanted, min_bits, bit_off,
1153 num_bits, bg_blkno);
1156 if (status < 0 && status != -ENOSPC) {
1161 mlog(0, "Search of victim chain %u came up with nothing, "
1162 "trying all chains now.\n", victim);
1164 /* If we didn't pick a good victim, then just default to
1165 * searching each chain in order. Don't allow chain relinking
1166 * because we only calculate enough journal credits for one
1167 * relink per alloc. */
1168 ac->ac_allow_chain_relink = 0;
1169 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
1172 if (!cl->cl_recs[i].c_free)
1176 status = ocfs2_search_chain(ac, bits_wanted, min_bits,
1181 if (status < 0 && status != -ENOSPC) {
1192 int ocfs2_claim_metadata(struct ocfs2_super *osb,
1193 struct ocfs2_journal_handle *handle,
1194 struct ocfs2_alloc_context *ac,
1196 u16 *suballoc_bit_start,
1197 unsigned int *num_bits,
1204 BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
1205 BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
1206 BUG_ON(ac->ac_handle != handle);
1208 status = ocfs2_claim_suballoc_bits(osb,
1219 atomic_inc(&osb->alloc_stats.bg_allocs);
1221 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
1222 ac->ac_bits_given += (*num_bits);
1229 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
1230 struct ocfs2_journal_handle *handle,
1231 struct ocfs2_alloc_context *ac,
1236 unsigned int num_bits;
1242 BUG_ON(ac->ac_bits_given != 0);
1243 BUG_ON(ac->ac_bits_wanted != 1);
1244 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
1245 BUG_ON(ac->ac_handle != handle);
1247 status = ocfs2_claim_suballoc_bits(osb,
1258 atomic_inc(&osb->alloc_stats.bg_allocs);
1260 BUG_ON(num_bits != 1);
1262 *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
1263 ac->ac_bits_given++;
1270 /* translate a group desc. blkno and it's bitmap offset into
1271 * disk cluster offset. */
1272 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
1276 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1279 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1281 if (bg_blkno != osb->first_cluster_group_blkno)
1282 cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
1283 cluster += (u32) bg_bit_off;
1287 /* given a cluster offset, calculate which block group it belongs to
1288 * and return that block offset. */
1289 static inline u64 ocfs2_which_cluster_group(struct inode *inode,
1292 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1295 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1297 group_no = cluster / osb->bitmap_cpg;
1299 return osb->first_cluster_group_blkno;
1300 return ocfs2_clusters_to_blocks(inode->i_sb,
1301 group_no * osb->bitmap_cpg);
1304 /* given the block number of a cluster start, calculate which cluster
1305 * group and descriptor bitmap offset that corresponds to. */
1306 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
1311 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1312 u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
1314 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1316 *bg_blkno = ocfs2_which_cluster_group(inode,
1319 if (*bg_blkno == osb->first_cluster_group_blkno)
1320 *bg_bit_off = (u16) data_cluster;
1322 *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
1323 data_blkno - *bg_blkno);
1327 * min_bits - minimum contiguous chunk from this total allocation we
1328 * can handle. set to what we asked for originally for a full
1329 * contig. allocation, set to '1' to indicate we can deal with extents
1332 int ocfs2_claim_clusters(struct ocfs2_super *osb,
1333 struct ocfs2_journal_handle *handle,
1334 struct ocfs2_alloc_context *ac,
1340 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
1347 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1349 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
1350 && ac->ac_which != OCFS2_AC_USE_MAIN);
1351 BUG_ON(ac->ac_handle != handle);
1353 if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
1354 status = ocfs2_claim_local_alloc_bits(osb,
1361 atomic_inc(&osb->alloc_stats.local_data);
1363 if (min_clusters > (osb->bitmap_cpg - 1)) {
1364 /* The only paths asking for contiguousness
1365 * should know about this already. */
1366 mlog(ML_ERROR, "minimum allocation requested exceeds "
1367 "group bitmap size!");
1371 /* clamp the current request down to a realistic size. */
1372 if (bits_wanted > (osb->bitmap_cpg - 1))
1373 bits_wanted = osb->bitmap_cpg - 1;
1375 status = ocfs2_claim_suballoc_bits(osb,
1384 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
1387 atomic_inc(&osb->alloc_stats.bitmap_data);
1391 if (status != -ENOSPC)
1396 ac->ac_bits_given += *num_clusters;
1403 static inline int ocfs2_block_group_clear_bits(struct ocfs2_journal_handle *handle,
1404 struct inode *alloc_inode,
1405 struct ocfs2_group_desc *bg,
1406 struct buffer_head *group_bh,
1407 unsigned int bit_off,
1408 unsigned int num_bits)
1412 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
1413 struct ocfs2_group_desc *undo_bg = NULL;
1417 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
1418 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
1423 mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
1425 if (ocfs2_is_cluster_bitmap(alloc_inode))
1426 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
1428 status = ocfs2_journal_access(handle, alloc_inode, group_bh,
1435 if (ocfs2_is_cluster_bitmap(alloc_inode))
1436 undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
1440 ocfs2_clear_bit((bit_off + tmp),
1441 (unsigned long *) bg->bg_bitmap);
1442 if (ocfs2_is_cluster_bitmap(alloc_inode))
1443 ocfs2_set_bit(bit_off + tmp,
1444 (unsigned long *) undo_bg->bg_bitmap);
1446 le16_add_cpu(&bg->bg_free_bits_count, num_bits);
1448 status = ocfs2_journal_dirty(handle, group_bh);
1456 * expects the suballoc inode to already be locked.
1458 static int ocfs2_free_suballoc_bits(struct ocfs2_journal_handle *handle,
1459 struct inode *alloc_inode,
1460 struct buffer_head *alloc_bh,
1461 unsigned int start_bit,
1467 struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
1468 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
1469 struct ocfs2_chain_list *cl = &fe->id2.i_chain;
1470 struct buffer_head *group_bh = NULL;
1471 struct ocfs2_group_desc *group;
1475 if (!OCFS2_IS_VALID_DINODE(fe)) {
1476 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
1480 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
1482 mlog(0, "suballocator %"MLFu64": freeing %u bits from group %"MLFu64
1483 ", starting at %u\n",
1484 OCFS2_I(alloc_inode)->ip_blkno, count, bg_blkno,
1487 status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
1494 group = (struct ocfs2_group_desc *) group_bh->b_data;
1495 if (!OCFS2_IS_VALID_GROUP_DESC(group)) {
1496 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, group);
1500 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
1502 status = ocfs2_block_group_clear_bits(handle, alloc_inode,
1510 status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
1511 OCFS2_JOURNAL_ACCESS_WRITE);
1517 le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
1519 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1520 fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
1522 status = ocfs2_journal_dirty(handle, alloc_bh);
1536 static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
1538 u64 group = block - (u64) bit;
1543 int ocfs2_free_dinode(struct ocfs2_journal_handle *handle,
1544 struct inode *inode_alloc_inode,
1545 struct buffer_head *inode_alloc_bh,
1546 struct ocfs2_dinode *di)
1548 u64 blk = le64_to_cpu(di->i_blkno);
1549 u16 bit = le16_to_cpu(di->i_suballoc_bit);
1550 u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
1552 return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
1553 inode_alloc_bh, bit, bg_blkno, 1);
1556 int ocfs2_free_extent_block(struct ocfs2_journal_handle *handle,
1557 struct inode *eb_alloc_inode,
1558 struct buffer_head *eb_alloc_bh,
1559 struct ocfs2_extent_block *eb)
1561 u64 blk = le64_to_cpu(eb->h_blkno);
1562 u16 bit = le16_to_cpu(eb->h_suballoc_bit);
1563 u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
1565 return ocfs2_free_suballoc_bits(handle, eb_alloc_inode, eb_alloc_bh,
1569 int ocfs2_free_clusters(struct ocfs2_journal_handle *handle,
1570 struct inode *bitmap_inode,
1571 struct buffer_head *bitmap_bh,
1573 unsigned int num_clusters)
1578 struct ocfs2_dinode *fe;
1580 /* You can't ever have a contiguous set of clusters
1581 * bigger than a block group bitmap so we never have to worry
1582 * about looping on them. */
1586 /* This is expensive. We can safely remove once this stuff has
1587 * gotten tested really well. */
1588 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
1590 fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
1592 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
1595 mlog(0, "want to free %u clusters starting at block %"MLFu64"\n",
1596 num_clusters, start_blk);
1597 mlog(0, "bg_blkno = %"MLFu64", bg_start_bit = %u\n",
1598 bg_blkno, bg_start_bit);
1600 status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
1601 bg_start_bit, bg_blkno,
1610 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
1612 printk("Block Group:\n");
1613 printk("bg_signature: %s\n", bg->bg_signature);
1614 printk("bg_size: %u\n", bg->bg_size);
1615 printk("bg_bits: %u\n", bg->bg_bits);
1616 printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
1617 printk("bg_chain: %u\n", bg->bg_chain);
1618 printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
1619 printk("bg_next_group: %"MLFu64"\n", bg->bg_next_group);
1620 printk("bg_parent_dinode: %"MLFu64"\n", bg->bg_parent_dinode);
1621 printk("bg_blkno: %"MLFu64"\n", bg->bg_blkno);
1624 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
1628 printk("Suballoc Inode %"MLFu64":\n", fe->i_blkno);
1629 printk("i_signature: %s\n", fe->i_signature);
1630 printk("i_size: %"MLFu64"\n", fe->i_size);
1631 printk("i_clusters: %u\n", fe->i_clusters);
1632 printk("i_generation: %u\n",
1633 le32_to_cpu(fe->i_generation));
1634 printk("id1.bitmap1.i_used: %u\n",
1635 le32_to_cpu(fe->id1.bitmap1.i_used));
1636 printk("id1.bitmap1.i_total: %u\n",
1637 le32_to_cpu(fe->id1.bitmap1.i_total));
1638 printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
1639 printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
1640 printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
1641 printk("id2.i_chain.cl_next_free_rec: %u\n",
1642 fe->id2.i_chain.cl_next_free_rec);
1643 for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
1644 printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
1645 fe->id2.i_chain.cl_recs[i].c_free);
1646 printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
1647 fe->id2.i_chain.cl_recs[i].c_total);
1648 printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %"MLFu64"\n", i,
1649 fe->id2.i_chain.cl_recs[i].c_blkno);