2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/kthread.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <asm/semaphore.h>
20 #include "lm_interface.h"
29 static int munge_ondisk(struct gfs2_sbd *sdp, unsigned int slot,
30 struct gfs2_unlinked_tag *ut)
32 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
33 unsigned int block, offset;
36 struct buffer_head *bh;
39 block = slot / sdp->sd_ut_per_block;
40 offset = slot % sdp->sd_ut_per_block;
42 error = gfs2_block_map(ip, block, &new, &dblock, NULL);
45 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT, &bh);
48 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
53 mutex_lock(&sdp->sd_unlinked_mutex);
54 gfs2_trans_add_bh(ip->i_gl, bh, 1);
55 gfs2_unlinked_tag_out(ut, bh->b_data +
56 sizeof(struct gfs2_meta_header) +
57 offset * sizeof(struct gfs2_unlinked_tag));
58 mutex_unlock(&sdp->sd_unlinked_mutex);
66 static void ul_hash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
68 spin_lock(&sdp->sd_unlinked_spin);
69 list_add(&ul->ul_list, &sdp->sd_unlinked_list);
70 gfs2_assert(sdp, ul->ul_count);
72 atomic_inc(&sdp->sd_unlinked_count);
73 spin_unlock(&sdp->sd_unlinked_spin);
76 static void ul_unhash(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
78 spin_lock(&sdp->sd_unlinked_spin);
79 list_del_init(&ul->ul_list);
80 gfs2_assert(sdp, ul->ul_count > 1);
82 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_unlinked_count) > 0);
83 atomic_dec(&sdp->sd_unlinked_count);
84 spin_unlock(&sdp->sd_unlinked_spin);
87 static struct gfs2_unlinked *ul_fish(struct gfs2_sbd *sdp)
89 struct list_head *head;
90 struct gfs2_unlinked *ul;
93 if (sdp->sd_vfs->s_flags & MS_RDONLY)
96 spin_lock(&sdp->sd_unlinked_spin);
98 head = &sdp->sd_unlinked_list;
100 list_for_each_entry(ul, head, ul_list) {
101 if (test_bit(ULF_LOCKED, &ul->ul_flags))
104 list_move_tail(&ul->ul_list, head);
106 set_bit(ULF_LOCKED, &ul->ul_flags);
115 spin_unlock(&sdp->sd_unlinked_spin);
121 * enforce_limit - limit the number of inodes waiting to be deallocated
122 * @sdp: the filesystem
127 static void enforce_limit(struct gfs2_sbd *sdp)
129 unsigned int tries = 0, min = 0;
132 if (atomic_read(&sdp->sd_unlinked_count) >=
133 gfs2_tune_get(sdp, gt_ilimit)) {
134 tries = gfs2_tune_get(sdp, gt_ilimit_tries);
135 min = gfs2_tune_get(sdp, gt_ilimit_min);
139 struct gfs2_unlinked *ul = ul_fish(sdp);
142 error = gfs2_inode_dealloc(sdp, ul);
143 gfs2_unlinked_put(sdp, ul);
148 } else if (error != 1)
153 static struct gfs2_unlinked *ul_alloc(struct gfs2_sbd *sdp)
155 struct gfs2_unlinked *ul;
157 ul = kzalloc(sizeof(struct gfs2_unlinked), GFP_KERNEL);
159 INIT_LIST_HEAD(&ul->ul_list);
161 set_bit(ULF_LOCKED, &ul->ul_flags);
167 int gfs2_unlinked_get(struct gfs2_sbd *sdp, struct gfs2_unlinked **ul)
169 unsigned int c, o = 0, b;
170 unsigned char byte = 0;
178 spin_lock(&sdp->sd_unlinked_spin);
180 for (c = 0; c < sdp->sd_unlinked_chunks; c++)
181 for (o = 0; o < PAGE_SIZE; o++) {
182 byte = sdp->sd_unlinked_bitmap[c][o];
190 for (b = 0; b < 8; b++)
191 if (!(byte & (1 << b)))
193 (*ul)->ul_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
195 if ((*ul)->ul_slot >= sdp->sd_unlinked_slots)
198 sdp->sd_unlinked_bitmap[c][o] |= 1 << b;
200 spin_unlock(&sdp->sd_unlinked_spin);
205 spin_unlock(&sdp->sd_unlinked_spin);
210 void gfs2_unlinked_put(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
212 gfs2_assert_warn(sdp, test_and_clear_bit(ULF_LOCKED, &ul->ul_flags));
214 spin_lock(&sdp->sd_unlinked_spin);
215 gfs2_assert(sdp, ul->ul_count);
218 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, ul->ul_slot, 0);
219 spin_unlock(&sdp->sd_unlinked_spin);
222 spin_unlock(&sdp->sd_unlinked_spin);
225 int gfs2_unlinked_ondisk_add(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
229 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
230 gfs2_assert_warn(sdp, list_empty(&ul->ul_list));
232 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
239 int gfs2_unlinked_ondisk_munge(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
243 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
244 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
246 error = munge_ondisk(sdp, ul->ul_slot, &ul->ul_ut);
251 int gfs2_unlinked_ondisk_rm(struct gfs2_sbd *sdp, struct gfs2_unlinked *ul)
253 struct gfs2_unlinked_tag ut;
256 gfs2_assert_warn(sdp, test_bit(ULF_LOCKED, &ul->ul_flags));
257 gfs2_assert_warn(sdp, !list_empty(&ul->ul_list));
259 memset(&ut, 0, sizeof(struct gfs2_unlinked_tag));
261 error = munge_ondisk(sdp, ul->ul_slot, &ut);
271 * gfs2_unlinked_dealloc - Go through the list of inodes to be deallocated
272 * @sdp: the filesystem
277 int gfs2_unlinked_dealloc(struct gfs2_sbd *sdp)
279 unsigned int hits, strikes;
287 struct gfs2_unlinked *ul = ul_fish(sdp);
290 error = gfs2_inode_dealloc(sdp, ul);
291 gfs2_unlinked_put(sdp, ul);
297 } else if (error == 1) {
300 atomic_read(&sdp->sd_unlinked_count)) {
308 if (!hits || kthread_should_stop())
317 int gfs2_unlinked_init(struct gfs2_sbd *sdp)
319 struct gfs2_inode *ip = sdp->sd_ut_inode->u.generic_ip;
320 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
321 unsigned int x, slot = 0;
322 unsigned int found = 0;
327 if (!ip->i_di.di_size ||
328 ip->i_di.di_size > (64 << 20) ||
329 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
330 gfs2_consist_inode(ip);
333 sdp->sd_unlinked_slots = blocks * sdp->sd_ut_per_block;
334 sdp->sd_unlinked_chunks = DIV_ROUND_UP(sdp->sd_unlinked_slots,
339 sdp->sd_unlinked_bitmap = kcalloc(sdp->sd_unlinked_chunks,
340 sizeof(unsigned char *),
342 if (!sdp->sd_unlinked_bitmap)
345 for (x = 0; x < sdp->sd_unlinked_chunks; x++) {
346 sdp->sd_unlinked_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
347 if (!sdp->sd_unlinked_bitmap[x])
351 for (x = 0; x < blocks; x++) {
352 struct buffer_head *bh;
357 error = gfs2_block_map(ip, x, &new, &dblock, &extlen);
361 gfs2_meta_ra(ip->i_gl, dblock, extlen);
362 error = gfs2_meta_read(ip->i_gl, dblock, DIO_START | DIO_WAIT,
367 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_UT)) {
373 y < sdp->sd_ut_per_block && slot < sdp->sd_unlinked_slots;
375 struct gfs2_unlinked_tag ut;
376 struct gfs2_unlinked *ul;
378 gfs2_unlinked_tag_in(&ut, bh->b_data +
379 sizeof(struct gfs2_meta_header) +
380 y * sizeof(struct gfs2_unlinked_tag));
381 if (!ut.ut_inum.no_addr)
393 spin_lock(&sdp->sd_unlinked_spin);
394 gfs2_icbit_munge(sdp, sdp->sd_unlinked_bitmap, slot, 1);
395 spin_unlock(&sdp->sd_unlinked_spin);
398 gfs2_unlinked_put(sdp, ul);
408 fs_info(sdp, "found %u unlinked inodes\n", found);
413 gfs2_unlinked_cleanup(sdp);
418 * gfs2_unlinked_cleanup - get rid of any extra struct gfs2_unlinked structures
419 * @sdp: the filesystem
423 void gfs2_unlinked_cleanup(struct gfs2_sbd *sdp)
425 struct list_head *head = &sdp->sd_unlinked_list;
426 struct gfs2_unlinked *ul;
429 spin_lock(&sdp->sd_unlinked_spin);
430 while (!list_empty(head)) {
431 ul = list_entry(head->next, struct gfs2_unlinked, ul_list);
433 if (ul->ul_count > 1) {
434 list_move_tail(&ul->ul_list, head);
435 spin_unlock(&sdp->sd_unlinked_spin);
437 spin_lock(&sdp->sd_unlinked_spin);
441 list_del_init(&ul->ul_list);
442 atomic_dec(&sdp->sd_unlinked_count);
444 gfs2_assert_warn(sdp, ul->ul_count == 1);
445 gfs2_assert_warn(sdp, !test_bit(ULF_LOCKED, &ul->ul_flags));
448 spin_unlock(&sdp->sd_unlinked_spin);
450 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_unlinked_count));
452 if (sdp->sd_unlinked_bitmap) {
453 for (x = 0; x < sdp->sd_unlinked_chunks; x++)
454 kfree(sdp->sd_unlinked_bitmap[x]);
455 kfree(sdp->sd_unlinked_bitmap);