2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
18 #include "lm_interface.h"
29 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
31 struct gfs2_glock *gl;
32 struct gfs2_trans *tr = current->journal_info;
36 if (!list_empty(&le->le_list))
39 gl = container_of(le, struct gfs2_glock, gl_le);
40 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
43 set_bit(GLF_DIRTY, &gl->gl_flags);
47 list_add(&le->le_list, &sdp->sd_log_le_gl);
51 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
53 struct list_head *head = &sdp->sd_log_le_gl;
54 struct gfs2_glock *gl;
56 while (!list_empty(head)) {
57 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
58 list_del_init(&gl->gl_le.le_list);
61 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
64 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
67 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr;
72 if (!list_empty(&bd->bd_list_tr))
75 tr = current->journal_info;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
80 if (!list_empty(&le->le_list))
83 gfs2_trans_add_gl(bd->bd_gl);
85 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh);
89 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf);
96 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
98 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd;
101 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr);
106 gfs2_assert_warn(sdp, !tr->tr_num_buf);
109 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
111 struct buffer_head *bh;
112 struct gfs2_log_descriptor *ld;
113 struct gfs2_bufdata *bd1 = NULL, *bd2;
114 unsigned int total = sdp->sd_log_num_buf;
115 unsigned int offset = sizeof(struct gfs2_log_descriptor);
121 offset += (sizeof(__be64) - 1);
122 offset &= ~(sizeof(__be64) - 1);
123 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
124 /* for 4k blocks, limit = 503 */
126 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
131 bh = gfs2_log_get_buf(sdp);
132 sdp->sd_log_num_hdrs++;
133 ld = (struct gfs2_log_descriptor *)bh->b_data;
134 ptr = (__be64 *)(bh->b_data + offset);
135 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
136 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
137 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
138 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
139 ld->ld_length = cpu_to_be32(num + 1);
140 ld->ld_data1 = cpu_to_be32(num);
141 ld->ld_data2 = cpu_to_be32(0);
142 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
145 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
147 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
152 set_buffer_dirty(bh);
153 ll_rw_block(WRITE, 1, &bh);
156 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
158 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
159 set_buffer_dirty(bh);
160 ll_rw_block(WRITE, 1, &bh);
169 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
171 struct list_head *head = &sdp->sd_log_le_buf;
172 struct gfs2_bufdata *bd;
174 while (!list_empty(head)) {
175 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
176 list_del_init(&bd->bd_le.le_list);
177 sdp->sd_log_num_buf--;
179 gfs2_unpin(sdp, bd->bd_bh, ai);
181 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
184 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 struct gfs2_log_header *head, int pass)
187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
192 sdp->sd_found_blocks = 0;
193 sdp->sd_replayed_blocks = 0;
196 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
197 struct gfs2_log_descriptor *ld, __be64 *ptr,
200 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
201 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
202 struct gfs2_glock *gl = ip->i_gl;
203 unsigned int blks = be32_to_cpu(ld->ld_data1);
204 struct buffer_head *bh_log, *bh_ip;
208 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
211 gfs2_replay_incr_blk(sdp, &start);
213 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
214 blkno = be64_to_cpu(*ptr++);
216 sdp->sd_found_blocks++;
218 if (gfs2_revoke_check(sdp, blkno, start))
221 error = gfs2_replay_read_block(jd, start, &bh_log);
225 bh_ip = gfs2_meta_new(gl, blkno);
226 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
228 if (gfs2_meta_check(sdp, bh_ip))
231 mark_buffer_dirty(bh_ip);
239 sdp->sd_replayed_blocks++;
245 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
247 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
248 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
251 gfs2_meta_sync(ip->i_gl,
252 DIO_START | DIO_WAIT);
258 gfs2_meta_sync(ip->i_gl, DIO_START | DIO_WAIT);
260 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
261 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
264 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
266 struct gfs2_trans *tr;
268 tr = current->journal_info;
273 sdp->sd_log_num_revoke++;
274 list_add(&le->le_list, &sdp->sd_log_le_revoke);
275 gfs2_log_unlock(sdp);
278 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
280 struct gfs2_log_descriptor *ld;
281 struct gfs2_meta_header *mh;
282 struct buffer_head *bh;
284 struct list_head *head = &sdp->sd_log_le_revoke;
285 struct gfs2_revoke *rv;
287 if (!sdp->sd_log_num_revoke)
290 bh = gfs2_log_get_buf(sdp);
291 ld = (struct gfs2_log_descriptor *)bh->b_data;
292 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
293 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
294 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
295 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
296 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
298 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
299 ld->ld_data2 = cpu_to_be32(0);
300 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
301 offset = sizeof(struct gfs2_log_descriptor);
303 while (!list_empty(head)) {
304 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
305 list_del_init(&rv->rv_le.le_list);
306 sdp->sd_log_num_revoke--;
308 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
309 set_buffer_dirty(bh);
310 ll_rw_block(WRITE, 1, &bh);
312 bh = gfs2_log_get_buf(sdp);
313 mh = (struct gfs2_meta_header *)bh->b_data;
314 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
315 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
316 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
317 offset = sizeof(struct gfs2_meta_header);
320 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
323 offset += sizeof(u64);
325 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
327 set_buffer_dirty(bh);
328 ll_rw_block(WRITE, 1, &bh);
331 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
332 struct gfs2_log_header *head, int pass)
334 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
339 sdp->sd_found_revokes = 0;
340 sdp->sd_replay_tail = head->lh_tail;
343 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
344 struct gfs2_log_descriptor *ld, __be64 *ptr,
347 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
348 unsigned int blks = be32_to_cpu(ld->ld_length);
349 unsigned int revokes = be32_to_cpu(ld->ld_data1);
350 struct buffer_head *bh;
356 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
359 offset = sizeof(struct gfs2_log_descriptor);
361 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
362 error = gfs2_replay_read_block(jd, start, &bh);
367 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
369 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
370 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
372 error = gfs2_revoke_add(sdp, blkno, start);
376 sdp->sd_found_revokes++;
380 offset += sizeof(u64);
384 offset = sizeof(struct gfs2_meta_header);
391 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
393 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
396 gfs2_revoke_clean(sdp);
402 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
403 jd->jd_jid, sdp->sd_found_revokes);
405 gfs2_revoke_clean(sdp);
408 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
410 struct gfs2_rgrpd *rgd;
411 struct gfs2_trans *tr = current->journal_info;
415 if (!list_empty(&le->le_list))
418 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
419 gfs2_rgrp_bh_hold(rgd);
422 sdp->sd_log_num_rg++;
423 list_add(&le->le_list, &sdp->sd_log_le_rg);
424 gfs2_log_unlock(sdp);
427 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
429 struct list_head *head = &sdp->sd_log_le_rg;
430 struct gfs2_rgrpd *rgd;
432 while (!list_empty(head)) {
433 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
434 list_del_init(&rgd->rd_le.le_list);
435 sdp->sd_log_num_rg--;
437 gfs2_rgrp_repolish_clones(rgd);
438 gfs2_rgrp_bh_put(rgd);
440 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
444 * databuf_lo_add - Add a databuf to the transaction.
446 * This is used in two distinct cases:
447 * i) In ordered write mode
448 * We put the data buffer on a list so that we can ensure that its
449 * synced to disk at the right time
450 * ii) In journaled data mode
451 * We need to journal the data block in the same way as metadata in
452 * the functions above. The difference is that here we have a tag
453 * which is two __be64's being the block number (as per meta data)
454 * and a flag which says whether the data block needs escaping or
455 * not. This means we need a new log entry for each 251 or so data
456 * blocks, which isn't an enormous overhead but twice as much as
457 * for normal metadata blocks.
459 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
461 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
462 struct gfs2_trans *tr = current->journal_info;
463 struct address_space *mapping = bd->bd_bh->b_page->mapping;
464 struct gfs2_inode *ip = GFS2_I(mapping->host);
467 if (list_empty(&bd->bd_list_tr) &&
468 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
470 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
471 gfs2_pin(sdp, bd->bd_bh);
472 tr->tr_num_buf_new++;
474 gfs2_trans_add_gl(bd->bd_gl);
476 if (list_empty(&le->le_list)) {
477 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
478 sdp->sd_log_num_jdata++;
479 sdp->sd_log_num_databuf++;
480 list_add(&le->le_list, &sdp->sd_log_le_databuf);
482 gfs2_log_unlock(sdp);
485 static int gfs2_check_magic(struct buffer_head *bh)
487 struct page *page = bh->b_page;
492 kaddr = kmap_atomic(page, KM_USER0);
493 ptr = kaddr + bh_offset(bh);
494 if (*ptr == cpu_to_be32(GFS2_MAGIC))
496 kunmap_atomic(page, KM_USER0);
502 * databuf_lo_before_commit - Scan the data buffers, writing as we go
504 * Here we scan through the lists of buffers and make the assumption
505 * that any buffer thats been pinned is being journaled, and that
506 * any unpinned buffer is an ordered write data buffer and therefore
507 * will be written back rather than journaled.
509 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
512 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
513 struct buffer_head *bh = NULL;
514 unsigned int offset = sizeof(struct gfs2_log_descriptor);
515 struct gfs2_log_descriptor *ld;
517 unsigned int total_dbuf = sdp->sd_log_num_databuf;
518 unsigned int total_jdata = sdp->sd_log_num_jdata;
522 offset += (2*sizeof(__be64) - 1);
523 offset &= ~(2*sizeof(__be64) - 1);
524 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
527 * Start writing ordered buffers, write journaled buffers
528 * into the log along with a header
531 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
538 list_for_each_entry_safe_continue(bd1, bdt,
539 &sdp->sd_log_le_databuf,
541 /* An ordered write buffer */
542 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
543 list_move(&bd1->bd_le.le_list, &started);
546 bd2 = list_prepare_entry(bd2,
547 &sdp->sd_log_le_databuf,
553 if (buffer_dirty(bd1->bd_bh)) {
554 gfs2_log_unlock(sdp);
555 wait_on_buffer(bd1->bd_bh);
556 ll_rw_block(WRITE, 1,
564 } else if (bd1->bd_bh) { /* A journaled buffer */
566 gfs2_log_unlock(sdp);
568 bh = gfs2_log_get_buf(sdp);
569 sdp->sd_log_num_hdrs++;
570 ld = (struct gfs2_log_descriptor *)
572 ptr = (__be64 *)(bh->b_data + offset);
573 ld->ld_header.mh_magic =
574 cpu_to_be32(GFS2_MAGIC);
575 ld->ld_header.mh_type =
576 cpu_to_be32(GFS2_METATYPE_LD);
577 ld->ld_header.mh_format =
578 cpu_to_be32(GFS2_FORMAT_LD);
580 cpu_to_be32(GFS2_LOG_DESC_JDATA);
581 ld->ld_length = cpu_to_be32(num + 1);
582 ld->ld_data1 = cpu_to_be32(num);
583 ld->ld_data2 = cpu_to_be32(0);
584 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
586 magic = gfs2_check_magic(bd1->bd_bh);
587 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
588 *ptr++ = cpu_to_be64((__u64)magic);
589 clear_buffer_escaped(bd1->bd_bh);
590 if (unlikely(magic != 0))
591 set_buffer_escaped(bd1->bd_bh);
595 } else if (!bd1->bd_bh) {
597 sdp->sd_log_num_databuf--;
598 list_del_init(&bd1->bd_le.le_list);
601 bd2 = list_prepare_entry(bd2,
602 &sdp->sd_log_le_databuf,
605 kmem_cache_free(gfs2_bufdata_cachep, bd1);
608 gfs2_log_unlock(sdp);
610 set_buffer_dirty(bh);
611 ll_rw_block(WRITE, 1, &bh);
616 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
620 /* copy buffer if it needs escaping */
621 gfs2_log_unlock(sdp);
622 if (unlikely(buffer_escaped(bd2->bd_bh))) {
624 struct page *page = bd2->bd_bh->b_page;
625 bh = gfs2_log_get_buf(sdp);
626 kaddr = kmap_atomic(page, KM_USER0);
628 kaddr + bh_offset(bd2->bd_bh),
629 sdp->sd_sb.sb_bsize);
630 kunmap_atomic(page, KM_USER0);
631 *(__be32 *)bh->b_data = 0;
633 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
635 set_buffer_dirty(bh);
636 ll_rw_block(WRITE, 1, &bh);
645 gfs2_log_unlock(sdp);
647 /* Wait on all ordered buffers */
648 while (!list_empty(&started)) {
650 bd1 = list_entry(started.next, struct gfs2_bufdata,
652 list_del_init(&bd1->bd_le.le_list);
653 sdp->sd_log_num_databuf--;
656 bh->b_private = NULL;
658 gfs2_log_unlock(sdp);
662 gfs2_log_unlock(sdp);
664 kmem_cache_free(gfs2_bufdata_cachep, bd1);
667 /* We've removed all the ordered write bufs here, so only jdata left */
668 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
671 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
672 struct gfs2_log_descriptor *ld,
673 __be64 *ptr, int pass)
675 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
676 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
677 struct gfs2_glock *gl = ip->i_gl;
678 unsigned int blks = be32_to_cpu(ld->ld_data1);
679 struct buffer_head *bh_log, *bh_ip;
684 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
687 gfs2_replay_incr_blk(sdp, &start);
688 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
689 blkno = be64_to_cpu(*ptr++);
690 esc = be64_to_cpu(*ptr++);
692 sdp->sd_found_blocks++;
694 if (gfs2_revoke_check(sdp, blkno, start))
697 error = gfs2_replay_read_block(jd, start, &bh_log);
701 bh_ip = gfs2_meta_new(gl, blkno);
702 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
706 __be32 *eptr = (__be32 *)bh_ip->b_data;
707 *eptr = cpu_to_be32(GFS2_MAGIC);
709 mark_buffer_dirty(bh_ip);
716 sdp->sd_replayed_blocks++;
722 /* FIXME: sort out accounting for log blocks etc. */
724 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
726 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
727 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
730 gfs2_meta_sync(ip->i_gl,
731 DIO_START | DIO_WAIT);
738 gfs2_meta_sync(ip->i_gl, DIO_START | DIO_WAIT);
740 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
741 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
744 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
746 struct list_head *head = &sdp->sd_log_le_databuf;
747 struct gfs2_bufdata *bd;
749 while (!list_empty(head)) {
750 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
751 list_del_init(&bd->bd_le.le_list);
752 sdp->sd_log_num_databuf--;
753 sdp->sd_log_num_jdata--;
754 gfs2_unpin(sdp, bd->bd_bh, ai);
756 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
757 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
761 const struct gfs2_log_operations gfs2_glock_lops = {
762 .lo_add = glock_lo_add,
763 .lo_after_commit = glock_lo_after_commit,
767 const struct gfs2_log_operations gfs2_buf_lops = {
768 .lo_add = buf_lo_add,
769 .lo_incore_commit = buf_lo_incore_commit,
770 .lo_before_commit = buf_lo_before_commit,
771 .lo_after_commit = buf_lo_after_commit,
772 .lo_before_scan = buf_lo_before_scan,
773 .lo_scan_elements = buf_lo_scan_elements,
774 .lo_after_scan = buf_lo_after_scan,
778 const struct gfs2_log_operations gfs2_revoke_lops = {
779 .lo_add = revoke_lo_add,
780 .lo_before_commit = revoke_lo_before_commit,
781 .lo_before_scan = revoke_lo_before_scan,
782 .lo_scan_elements = revoke_lo_scan_elements,
783 .lo_after_scan = revoke_lo_after_scan,
787 const struct gfs2_log_operations gfs2_rg_lops = {
789 .lo_after_commit = rg_lo_after_commit,
793 const struct gfs2_log_operations gfs2_databuf_lops = {
794 .lo_add = databuf_lo_add,
795 .lo_incore_commit = buf_lo_incore_commit,
796 .lo_before_commit = databuf_lo_before_commit,
797 .lo_after_commit = databuf_lo_after_commit,
798 .lo_scan_elements = databuf_lo_scan_elements,
799 .lo_after_scan = databuf_lo_after_scan,
803 const struct gfs2_log_operations *gfs2_log_ops[] = {