2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
29 static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
31 struct gfs2_glock *gl;
32 struct gfs2_trans *tr = current->journal_info;
36 if (!list_empty(&le->le_list))
39 gl = container_of(le, struct gfs2_glock, gl_le);
40 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
43 set_bit(GLF_DIRTY, &gl->gl_flags);
47 list_add(&le->le_list, &sdp->sd_log_le_gl);
51 static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
53 struct list_head *head = &sdp->sd_log_le_gl;
54 struct gfs2_glock *gl;
56 while (!list_empty(head)) {
57 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
58 list_del_init(&gl->gl_le.le_list);
61 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
64 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
67 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr;
72 if (!list_empty(&bd->bd_list_tr))
75 tr = current->journal_info;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
80 if (!list_empty(&le->le_list))
83 gfs2_trans_add_gl(bd->bd_gl);
85 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh);
89 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf);
96 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
98 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd;
101 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr);
106 gfs2_assert_warn(sdp, !tr->tr_num_buf);
109 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
111 struct buffer_head *bh;
112 struct gfs2_log_descriptor *ld;
113 struct gfs2_bufdata *bd1 = NULL, *bd2;
114 unsigned int total = sdp->sd_log_num_buf;
115 unsigned int offset = sizeof(struct gfs2_log_descriptor);
121 offset += sizeof(__be64) - 1;
122 offset &= ~(sizeof(__be64) - 1);
123 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
124 /* for 4k blocks, limit = 503 */
126 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
131 bh = gfs2_log_get_buf(sdp);
132 sdp->sd_log_num_hdrs++;
133 ld = (struct gfs2_log_descriptor *)bh->b_data;
134 ptr = (__be64 *)(bh->b_data + offset);
135 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
136 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
137 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
138 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
139 ld->ld_length = cpu_to_be32(num + 1);
140 ld->ld_data1 = cpu_to_be32(num);
141 ld->ld_data2 = cpu_to_be32(0);
142 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
145 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
147 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
152 set_buffer_dirty(bh);
153 ll_rw_block(WRITE, 1, &bh);
156 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
158 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
159 set_buffer_dirty(bh);
160 ll_rw_block(WRITE, 1, &bh);
169 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
171 struct list_head *head = &sdp->sd_log_le_buf;
172 struct gfs2_bufdata *bd;
174 while (!list_empty(head)) {
175 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
176 list_del_init(&bd->bd_le.le_list);
177 sdp->sd_log_num_buf--;
179 gfs2_unpin(sdp, bd->bd_bh, ai);
181 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
184 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 struct gfs2_log_header_host *head, int pass)
187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
192 sdp->sd_found_blocks = 0;
193 sdp->sd_replayed_blocks = 0;
196 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
197 struct gfs2_log_descriptor *ld, __be64 *ptr,
200 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
201 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
202 struct gfs2_glock *gl = ip->i_gl;
203 unsigned int blks = be32_to_cpu(ld->ld_data1);
204 struct buffer_head *bh_log, *bh_ip;
208 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
211 gfs2_replay_incr_blk(sdp, &start);
213 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
214 blkno = be64_to_cpu(*ptr++);
216 sdp->sd_found_blocks++;
218 if (gfs2_revoke_check(sdp, blkno, start))
221 error = gfs2_replay_read_block(jd, start, &bh_log);
225 bh_ip = gfs2_meta_new(gl, blkno);
226 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
228 if (gfs2_meta_check(sdp, bh_ip))
231 mark_buffer_dirty(bh_ip);
239 sdp->sd_replayed_blocks++;
245 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
247 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
248 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
251 gfs2_meta_sync(ip->i_gl);
257 gfs2_meta_sync(ip->i_gl);
259 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
260 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
263 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
265 struct gfs2_trans *tr;
267 tr = current->journal_info;
272 sdp->sd_log_num_revoke++;
273 list_add(&le->le_list, &sdp->sd_log_le_revoke);
274 gfs2_log_unlock(sdp);
277 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
279 struct gfs2_log_descriptor *ld;
280 struct gfs2_meta_header *mh;
281 struct buffer_head *bh;
283 struct list_head *head = &sdp->sd_log_le_revoke;
284 struct gfs2_revoke *rv;
286 if (!sdp->sd_log_num_revoke)
289 bh = gfs2_log_get_buf(sdp);
290 ld = (struct gfs2_log_descriptor *)bh->b_data;
291 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
292 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
293 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
294 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
295 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
297 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
298 ld->ld_data2 = cpu_to_be32(0);
299 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
300 offset = sizeof(struct gfs2_log_descriptor);
302 while (!list_empty(head)) {
303 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
304 list_del_init(&rv->rv_le.le_list);
305 sdp->sd_log_num_revoke--;
307 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
308 set_buffer_dirty(bh);
309 ll_rw_block(WRITE, 1, &bh);
311 bh = gfs2_log_get_buf(sdp);
312 mh = (struct gfs2_meta_header *)bh->b_data;
313 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
314 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
315 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
316 offset = sizeof(struct gfs2_meta_header);
319 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
322 offset += sizeof(u64);
324 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
326 set_buffer_dirty(bh);
327 ll_rw_block(WRITE, 1, &bh);
330 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
331 struct gfs2_log_header_host *head, int pass)
333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
338 sdp->sd_found_revokes = 0;
339 sdp->sd_replay_tail = head->lh_tail;
342 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
343 struct gfs2_log_descriptor *ld, __be64 *ptr,
346 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
347 unsigned int blks = be32_to_cpu(ld->ld_length);
348 unsigned int revokes = be32_to_cpu(ld->ld_data1);
349 struct buffer_head *bh;
355 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
358 offset = sizeof(struct gfs2_log_descriptor);
360 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
361 error = gfs2_replay_read_block(jd, start, &bh);
366 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
368 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
369 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
371 error = gfs2_revoke_add(sdp, blkno, start);
375 sdp->sd_found_revokes++;
379 offset += sizeof(u64);
383 offset = sizeof(struct gfs2_meta_header);
390 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
392 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
395 gfs2_revoke_clean(sdp);
401 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
402 jd->jd_jid, sdp->sd_found_revokes);
404 gfs2_revoke_clean(sdp);
407 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
409 struct gfs2_rgrpd *rgd;
410 struct gfs2_trans *tr = current->journal_info;
414 if (!list_empty(&le->le_list))
417 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
418 gfs2_rgrp_bh_hold(rgd);
421 sdp->sd_log_num_rg++;
422 list_add(&le->le_list, &sdp->sd_log_le_rg);
423 gfs2_log_unlock(sdp);
426 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
428 struct list_head *head = &sdp->sd_log_le_rg;
429 struct gfs2_rgrpd *rgd;
431 while (!list_empty(head)) {
432 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
433 list_del_init(&rgd->rd_le.le_list);
434 sdp->sd_log_num_rg--;
436 gfs2_rgrp_repolish_clones(rgd);
437 gfs2_rgrp_bh_put(rgd);
439 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
443 * databuf_lo_add - Add a databuf to the transaction.
445 * This is used in two distinct cases:
446 * i) In ordered write mode
447 * We put the data buffer on a list so that we can ensure that its
448 * synced to disk at the right time
449 * ii) In journaled data mode
450 * We need to journal the data block in the same way as metadata in
451 * the functions above. The difference is that here we have a tag
452 * which is two __be64's being the block number (as per meta data)
453 * and a flag which says whether the data block needs escaping or
454 * not. This means we need a new log entry for each 251 or so data
455 * blocks, which isn't an enormous overhead but twice as much as
456 * for normal metadata blocks.
458 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
460 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
461 struct gfs2_trans *tr = current->journal_info;
462 struct address_space *mapping = bd->bd_bh->b_page->mapping;
463 struct gfs2_inode *ip = GFS2_I(mapping->host);
466 if (list_empty(&bd->bd_list_tr) &&
467 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
469 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
470 gfs2_pin(sdp, bd->bd_bh);
471 tr->tr_num_buf_new++;
473 gfs2_trans_add_gl(bd->bd_gl);
475 if (list_empty(&le->le_list)) {
476 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
477 sdp->sd_log_num_jdata++;
478 sdp->sd_log_num_databuf++;
479 list_add(&le->le_list, &sdp->sd_log_le_databuf);
481 gfs2_log_unlock(sdp);
484 static int gfs2_check_magic(struct buffer_head *bh)
486 struct page *page = bh->b_page;
491 kaddr = kmap_atomic(page, KM_USER0);
492 ptr = kaddr + bh_offset(bh);
493 if (*ptr == cpu_to_be32(GFS2_MAGIC))
495 kunmap_atomic(kaddr, KM_USER0);
501 * databuf_lo_before_commit - Scan the data buffers, writing as we go
503 * Here we scan through the lists of buffers and make the assumption
504 * that any buffer thats been pinned is being journaled, and that
505 * any unpinned buffer is an ordered write data buffer and therefore
506 * will be written back rather than journaled.
508 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
512 struct buffer_head *bh = NULL,*bh1 = NULL;
513 unsigned int offset = sizeof(struct gfs2_log_descriptor);
514 struct gfs2_log_descriptor *ld;
516 unsigned int total_dbuf = sdp->sd_log_num_databuf;
517 unsigned int total_jdata = sdp->sd_log_num_jdata;
521 offset += 2*sizeof(__be64) - 1;
522 offset &= ~(2*sizeof(__be64) - 1);
523 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
526 * Start writing ordered buffers, write journaled buffers
527 * into the log along with a header
530 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
537 list_for_each_entry_safe_continue(bd1, bdt,
538 &sdp->sd_log_le_databuf,
540 /* store off the buffer head in a local ptr since
541 * gfs2_bufdata might change when we drop the log lock
545 /* An ordered write buffer */
546 if (bh1 && !buffer_pinned(bh1)) {
547 list_move(&bd1->bd_le.le_list, &started);
550 bd2 = list_prepare_entry(bd2,
551 &sdp->sd_log_le_databuf,
556 if (buffer_dirty(bh1)) {
559 gfs2_log_unlock(sdp);
561 ll_rw_block(SWRITE, 1, &bh1);
569 } else if (bh1) { /* A journaled buffer */
571 gfs2_log_unlock(sdp);
573 bh = gfs2_log_get_buf(sdp);
574 sdp->sd_log_num_hdrs++;
575 ld = (struct gfs2_log_descriptor *)
577 ptr = (__be64 *)(bh->b_data + offset);
578 ld->ld_header.mh_magic =
579 cpu_to_be32(GFS2_MAGIC);
580 ld->ld_header.mh_type =
581 cpu_to_be32(GFS2_METATYPE_LD);
582 ld->ld_header.mh_format =
583 cpu_to_be32(GFS2_FORMAT_LD);
585 cpu_to_be32(GFS2_LOG_DESC_JDATA);
586 ld->ld_length = cpu_to_be32(num + 1);
587 ld->ld_data1 = cpu_to_be32(num);
588 ld->ld_data2 = cpu_to_be32(0);
589 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
591 magic = gfs2_check_magic(bh1);
592 *ptr++ = cpu_to_be64(bh1->b_blocknr);
593 *ptr++ = cpu_to_be64((__u64)magic);
594 clear_buffer_escaped(bh1);
595 if (unlikely(magic != 0))
596 set_buffer_escaped(bh1);
602 sdp->sd_log_num_databuf--;
603 list_del_init(&bd1->bd_le.le_list);
606 bd2 = list_prepare_entry(bd2,
607 &sdp->sd_log_le_databuf,
610 kmem_cache_free(gfs2_bufdata_cachep, bd1);
613 gfs2_log_unlock(sdp);
615 set_buffer_dirty(bh);
616 ll_rw_block(WRITE, 1, &bh);
621 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
625 /* copy buffer if it needs escaping */
626 gfs2_log_unlock(sdp);
627 if (unlikely(buffer_escaped(bd2->bd_bh))) {
629 struct page *page = bd2->bd_bh->b_page;
630 bh = gfs2_log_get_buf(sdp);
631 kaddr = kmap_atomic(page, KM_USER0);
633 kaddr + bh_offset(bd2->bd_bh),
634 sdp->sd_sb.sb_bsize);
635 kunmap_atomic(kaddr, KM_USER0);
636 *(__be32 *)bh->b_data = 0;
638 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
640 set_buffer_dirty(bh);
641 ll_rw_block(WRITE, 1, &bh);
650 gfs2_log_unlock(sdp);
652 /* Wait on all ordered buffers */
653 while (!list_empty(&started)) {
655 bd1 = list_entry(started.next, struct gfs2_bufdata,
657 list_del_init(&bd1->bd_le.le_list);
658 sdp->sd_log_num_databuf--;
661 bh->b_private = NULL;
663 gfs2_log_unlock(sdp);
667 gfs2_log_unlock(sdp);
669 kmem_cache_free(gfs2_bufdata_cachep, bd1);
672 /* We've removed all the ordered write bufs here, so only jdata left */
673 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
676 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
677 struct gfs2_log_descriptor *ld,
678 __be64 *ptr, int pass)
680 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
681 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
682 struct gfs2_glock *gl = ip->i_gl;
683 unsigned int blks = be32_to_cpu(ld->ld_data1);
684 struct buffer_head *bh_log, *bh_ip;
689 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
692 gfs2_replay_incr_blk(sdp, &start);
693 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
694 blkno = be64_to_cpu(*ptr++);
695 esc = be64_to_cpu(*ptr++);
697 sdp->sd_found_blocks++;
699 if (gfs2_revoke_check(sdp, blkno, start))
702 error = gfs2_replay_read_block(jd, start, &bh_log);
706 bh_ip = gfs2_meta_new(gl, blkno);
707 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
711 __be32 *eptr = (__be32 *)bh_ip->b_data;
712 *eptr = cpu_to_be32(GFS2_MAGIC);
714 mark_buffer_dirty(bh_ip);
721 sdp->sd_replayed_blocks++;
727 /* FIXME: sort out accounting for log blocks etc. */
729 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
731 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
732 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
735 gfs2_meta_sync(ip->i_gl);
742 gfs2_meta_sync(ip->i_gl);
744 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
745 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
748 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
750 struct list_head *head = &sdp->sd_log_le_databuf;
751 struct gfs2_bufdata *bd;
753 while (!list_empty(head)) {
754 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
755 list_del_init(&bd->bd_le.le_list);
756 sdp->sd_log_num_databuf--;
757 sdp->sd_log_num_jdata--;
758 gfs2_unpin(sdp, bd->bd_bh, ai);
760 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
761 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
765 const struct gfs2_log_operations gfs2_glock_lops = {
766 .lo_add = glock_lo_add,
767 .lo_after_commit = glock_lo_after_commit,
771 const struct gfs2_log_operations gfs2_buf_lops = {
772 .lo_add = buf_lo_add,
773 .lo_incore_commit = buf_lo_incore_commit,
774 .lo_before_commit = buf_lo_before_commit,
775 .lo_after_commit = buf_lo_after_commit,
776 .lo_before_scan = buf_lo_before_scan,
777 .lo_scan_elements = buf_lo_scan_elements,
778 .lo_after_scan = buf_lo_after_scan,
782 const struct gfs2_log_operations gfs2_revoke_lops = {
783 .lo_add = revoke_lo_add,
784 .lo_before_commit = revoke_lo_before_commit,
785 .lo_before_scan = revoke_lo_before_scan,
786 .lo_scan_elements = revoke_lo_scan_elements,
787 .lo_after_scan = revoke_lo_after_scan,
791 const struct gfs2_log_operations gfs2_rg_lops = {
793 .lo_after_commit = rg_lo_after_commit,
797 const struct gfs2_log_operations gfs2_databuf_lops = {
798 .lo_add = databuf_lo_add,
799 .lo_incore_commit = buf_lo_incore_commit,
800 .lo_before_commit = databuf_lo_before_commit,
801 .lo_after_commit = databuf_lo_after_commit,
802 .lo_scan_elements = databuf_lo_scan_elements,
803 .lo_after_scan = databuf_lo_after_scan,
804 .lo_name = "databuf",
807 const struct gfs2_log_operations *gfs2_log_ops[] = {