2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <asm/semaphore.h>
26 static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
29 wait_event(*wq, atomic_read(a) ? 0 : 1);
32 static void lock_for_trans(struct gfs2_sbd *sdp)
34 do_lock_wait(sdp, &sdp->sd_log_trans_wq, &sdp->sd_log_flush_count);
35 atomic_inc(&sdp->sd_log_trans_count);
38 static void unlock_from_trans(struct gfs2_sbd *sdp)
40 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_trans_count));
41 if (atomic_dec_and_test(&sdp->sd_log_trans_count))
42 wake_up(&sdp->sd_log_flush_wq);
45 static void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
47 atomic_inc(&sdp->sd_log_flush_count);
48 do_lock_wait(sdp, &sdp->sd_log_flush_wq, &sdp->sd_log_trans_count);
51 static void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
53 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_flush_count));
54 if (atomic_dec_and_test(&sdp->sd_log_flush_count))
55 wake_up(&sdp->sd_log_trans_wq);
59 * gfs2_struct2blk - compute stuff
60 * @sdp: the filesystem
61 * @nstruct: the number of structures
62 * @ssize: the size of the structures
64 * Compute the number of log descriptor blocks needed to hold a certain number
65 * of structures of a certain size.
67 * Returns: the number of blocks needed (minimum is always 1)
70 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
74 unsigned int first, second;
77 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) /
80 if (nstruct > first) {
81 second = (sdp->sd_sb.sb_bsize -
82 sizeof(struct gfs2_meta_header)) / ssize;
83 blks += DIV_RU(nstruct - first, second);
89 void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
91 struct list_head *head = &sdp->sd_ail1_list;
93 struct list_head *first, *tmp;
94 struct gfs2_ail *first_ai, *ai;
97 if (list_empty(head)) {
101 sync_gen = sdp->sd_ail_sync_gen++;
104 first_ai = list_entry(first, struct gfs2_ail, ai_list);
105 first_ai->ai_sync_gen = sync_gen;
106 gfs2_ail1_start_one(sdp, first_ai);
113 (head->prev != first ||
114 gfs2_ail1_empty_one(sdp, first_ai, 0)))
117 for (tmp = head->prev; tmp != head; tmp = tmp->prev) {
118 ai = list_entry(tmp, struct gfs2_ail, ai_list);
119 if (ai->ai_sync_gen >= sync_gen)
121 ai->ai_sync_gen = sync_gen;
122 gfs2_ail1_start_one(sdp, ai);
130 gfs2_log_unlock(sdp);
133 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
135 struct gfs2_ail *ai, *s;
140 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
141 if (gfs2_ail1_empty_one(sdp, ai, flags))
142 list_move(&ai->ai_list, &sdp->sd_ail2_list);
143 else if (!(flags & DIO_ALL))
147 ret = list_empty(&sdp->sd_ail1_list);
149 gfs2_log_unlock(sdp);
154 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
156 struct gfs2_ail *ai, *safe;
157 unsigned int old_tail = sdp->sd_log_tail;
158 int wrap = (new_tail < old_tail);
163 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
164 a = (old_tail <= ai->ai_first);
165 b = (ai->ai_first < new_tail);
166 rm = (wrap) ? (a || b) : (a && b);
170 gfs2_ail2_empty_one(sdp, ai);
171 list_del(&ai->ai_list);
172 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
173 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
177 gfs2_log_unlock(sdp);
181 * gfs2_log_reserve - Make a log reservation
182 * @sdp: The GFS2 superblock
183 * @blks: The number of blocks to reserve
188 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
191 unsigned int try = 0;
193 if (gfs2_assert_warn(sdp, blks) ||
194 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
199 if (list_empty(&list)) {
200 list_add_tail(&list, &sdp->sd_log_blks_list);
201 while (sdp->sd_log_blks_list.next != &list) {
202 DECLARE_WAITQUEUE(__wait_chan, current);
203 set_current_state(TASK_UNINTERRUPTIBLE);
204 add_wait_queue(&sdp->sd_log_blks_wait,
206 gfs2_log_unlock(sdp);
209 remove_wait_queue(&sdp->sd_log_blks_wait,
211 set_current_state(TASK_RUNNING);
214 /* Never give away the last block so we can
215 always pull the tail if we need to. */
216 if (sdp->sd_log_blks_free > blks) {
217 sdp->sd_log_blks_free -= blks;
219 gfs2_log_unlock(sdp);
220 wake_up(&sdp->sd_log_blks_wait);
224 gfs2_log_unlock(sdp);
225 gfs2_ail1_empty(sdp, 0);
229 gfs2_ail1_start(sdp, 0);
237 * gfs2_log_release - Release a given number of log blocks
238 * @sdp: The GFS2 superblock
239 * @blks: The number of blocks
243 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
245 unlock_from_trans(sdp);
248 sdp->sd_log_blks_free += blks;
249 gfs2_assert_withdraw(sdp,
250 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
251 gfs2_log_unlock(sdp);
254 static uint64_t log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
260 error = gfs2_block_map(get_v2ip(sdp->sd_jdesc->jd_inode),
261 lbn, &new, &dbn, NULL);
262 gfs2_assert_withdraw(sdp, !error && dbn);
268 * log_distance - Compute distance between two journal blocks
269 * @sdp: The GFS2 superblock
270 * @newer: The most recent journal block of the pair
271 * @older: The older journal block of the pair
273 * Compute the distance (in the journal direction) between two
274 * blocks in the journal
276 * Returns: the distance in blocks
279 static inline unsigned int log_distance(struct gfs2_sbd *sdp,
285 dist = newer - older;
287 dist += sdp->sd_jdesc->jd_blocks;
292 static unsigned int current_tail(struct gfs2_sbd *sdp)
299 if (list_empty(&sdp->sd_ail1_list))
300 tail = sdp->sd_log_head;
302 ai = list_entry(sdp->sd_ail1_list.prev,
303 struct gfs2_ail, ai_list);
307 gfs2_log_unlock(sdp);
312 static inline void log_incr_head(struct gfs2_sbd *sdp)
314 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
315 gfs2_assert_withdraw(sdp,
316 sdp->sd_log_flush_head == sdp->sd_log_head);
318 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
319 sdp->sd_log_flush_head = 0;
320 sdp->sd_log_flush_wrapped = 1;
325 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
326 * @sdp: The GFS2 superblock
328 * Returns: the buffer_head
331 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
333 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
334 struct gfs2_log_buf *lb;
335 struct buffer_head *bh;
337 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
338 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
340 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
342 memset(bh->b_data, 0, bh->b_size);
343 set_buffer_uptodate(bh);
344 clear_buffer_dirty(bh);
353 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
354 * @sdp: the filesystem
355 * @data: the data the buffer_head should point to
357 * Returns: the log buffer descriptor
360 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
361 struct buffer_head *real)
363 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
364 struct gfs2_log_buf *lb;
365 struct buffer_head *bh;
367 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
368 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
371 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
372 atomic_set(&bh->b_count, 1);
373 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
374 set_bh_page(bh, real->b_page, bh_offset(real));
375 bh->b_blocknr = blkno;
376 bh->b_size = sdp->sd_sb.sb_bsize;
377 bh->b_bdev = sdp->sd_vfs->s_bdev;
384 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
386 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
388 ail2_empty(sdp, new_tail);
391 sdp->sd_log_blks_free += dist - ((pull) ? 1 : 0);
392 gfs2_assert_withdraw(sdp,
393 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
394 gfs2_log_unlock(sdp);
396 sdp->sd_log_tail = new_tail;
400 * log_write_header - Get and initialize a journal header buffer
401 * @sdp: The GFS2 superblock
403 * Returns: the initialized log buffer descriptor
406 static void log_write_header(struct gfs2_sbd *sdp, uint32_t flags, int pull)
408 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
409 struct buffer_head *bh;
410 struct gfs2_log_header *lh;
414 bh = sb_getblk(sdp->sd_vfs, blkno);
416 memset(bh->b_data, 0, bh->b_size);
417 set_buffer_uptodate(bh);
418 clear_buffer_dirty(bh);
421 gfs2_ail1_empty(sdp, 0);
422 tail = current_tail(sdp);
424 lh = (struct gfs2_log_header *)bh->b_data;
425 memset(lh, 0, sizeof(struct gfs2_log_header));
426 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
427 lh->lh_header.mh_type = cpu_to_be16(GFS2_METATYPE_LH);
428 lh->lh_header.mh_format = cpu_to_be16(GFS2_FORMAT_LH);
429 lh->lh_sequence = be64_to_cpu(sdp->sd_log_sequence++);
430 lh->lh_flags = be32_to_cpu(flags);
431 lh->lh_tail = be32_to_cpu(tail);
432 lh->lh_blkno = be32_to_cpu(sdp->sd_log_flush_head);
433 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
434 lh->lh_hash = cpu_to_be32(hash);
436 set_buffer_dirty(bh);
437 if (sync_dirty_buffer(bh))
438 gfs2_io_error_bh(sdp, bh);
441 if (sdp->sd_log_tail != tail)
442 log_pull_tail(sdp, tail, pull);
444 gfs2_assert_withdraw(sdp, !pull);
446 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
450 static void log_flush_commit(struct gfs2_sbd *sdp)
452 struct list_head *head = &sdp->sd_log_flush_list;
453 struct gfs2_log_buf *lb;
454 struct buffer_head *bh;
457 d = log_distance(sdp, sdp->sd_log_flush_head, sdp->sd_log_head);
459 gfs2_assert_withdraw(sdp, d + 1 == sdp->sd_log_blks_reserved);
461 while (!list_empty(head)) {
462 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
463 list_del(&lb->lb_list);
467 if (!buffer_uptodate(bh))
468 gfs2_io_error_bh(sdp, bh);
470 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
472 free_buffer_head(bh);
478 log_write_header(sdp, 0, 0);
482 * gfs2_log_flush_i - flush incore transaction(s)
483 * @sdp: the filesystem
484 * @gl: The glock structure to flush. If NULL, flush the whole incore log
488 void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
492 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
493 INIT_LIST_HEAD(&ai->ai_ail1_list);
494 INIT_LIST_HEAD(&ai->ai_ail2_list);
495 gfs2_lock_for_flush(sdp);
499 if (list_empty(&gl->gl_le.le_list)) {
500 gfs2_log_unlock(sdp);
501 gfs2_unlock_from_flush(sdp);
505 gfs2_log_unlock(sdp);
508 mutex_lock(&sdp->sd_log_flush_lock);
510 gfs2_assert_withdraw(sdp,
511 sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
512 gfs2_assert_withdraw(sdp,
513 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
515 sdp->sd_log_flush_head = sdp->sd_log_head;
516 sdp->sd_log_flush_wrapped = 0;
517 ai->ai_first = sdp->sd_log_flush_head;
519 lops_before_commit(sdp);
520 if (!list_empty(&sdp->sd_log_flush_list))
521 log_flush_commit(sdp);
522 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
523 log_write_header(sdp, 0, PULL);
524 lops_after_commit(sdp, ai);
525 sdp->sd_log_head = sdp->sd_log_flush_head;
526 if (sdp->sd_log_flush_wrapped)
529 sdp->sd_log_blks_reserved =
530 sdp->sd_log_commited_buf =
531 sdp->sd_log_commited_revoke = 0;
534 if (!list_empty(&ai->ai_ail1_list)) {
535 list_add(&ai->ai_list, &sdp->sd_ail1_list);
538 gfs2_log_unlock(sdp);
540 mutex_unlock(&sdp->sd_log_flush_lock);
541 sdp->sd_vfs->s_dirt = 0;
542 gfs2_unlock_from_flush(sdp);
547 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
549 unsigned int reserved = 1;
554 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
555 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
556 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
557 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
559 if (sdp->sd_log_commited_buf)
560 reserved += 1 + sdp->sd_log_commited_buf +
561 sdp->sd_log_commited_buf/503;
562 if (sdp->sd_log_commited_revoke)
563 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
566 old = sdp->sd_log_blks_free;
567 sdp->sd_log_blks_free += tr->tr_reserved -
568 (reserved - sdp->sd_log_blks_reserved);
570 gfs2_assert_withdraw(sdp,
571 sdp->sd_log_blks_free >= old);
572 gfs2_assert_withdraw(sdp,
573 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
575 sdp->sd_log_blks_reserved = reserved;
577 gfs2_log_unlock(sdp);
581 * gfs2_log_commit - Commit a transaction to the log
582 * @sdp: the filesystem
583 * @tr: the transaction
588 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
591 lops_incore_commit(sdp, tr);
593 sdp->sd_vfs->s_dirt = 1;
594 unlock_from_trans(sdp);
599 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
600 gfs2_log_unlock(sdp);
603 gfs2_log_unlock(sdp);
607 * gfs2_log_shutdown - write a shutdown header into a journal
608 * @sdp: the filesystem
612 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
614 mutex_lock(&sdp->sd_log_flush_lock);
616 gfs2_assert_withdraw(sdp, !atomic_read(&sdp->sd_log_trans_count));
617 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
618 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
619 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
620 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
621 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
622 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
623 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
624 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
626 sdp->sd_log_flush_head = sdp->sd_log_head;
627 sdp->sd_log_flush_wrapped = 0;
629 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
631 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free ==
632 sdp->sd_jdesc->jd_blocks);
633 gfs2_assert_withdraw(sdp, sdp->sd_log_head == sdp->sd_log_tail);
634 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail2_list));
636 sdp->sd_log_head = sdp->sd_log_flush_head;
637 if (sdp->sd_log_flush_wrapped)
639 sdp->sd_log_tail = sdp->sd_log_head;
641 mutex_unlock(&sdp->sd_log_flush_lock);