2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/crc32.h>
16 #include <linux/gfs2_ondisk.h>
17 #include <linux/bio.h>
18 #include <linux/lm_interface.h>
37 * gfs2_jindex_free - Clear all the journal index information
38 * @sdp: The GFS2 superblock
42 void gfs2_jindex_free(struct gfs2_sbd *sdp)
44 struct list_head list, *head;
45 struct gfs2_jdesc *jd;
46 struct gfs2_journal_extent *jext;
48 spin_lock(&sdp->sd_jindex_spin);
49 list_add(&list, &sdp->sd_jindex_list);
50 list_del_init(&sdp->sd_jindex_list);
52 spin_unlock(&sdp->sd_jindex_spin);
54 while (!list_empty(&list)) {
55 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
56 head = &jd->extent_list;
57 while (!list_empty(head)) {
58 jext = list_entry(head->next,
59 struct gfs2_journal_extent,
61 list_del(&jext->extent_list);
64 list_del(&jd->jd_list);
70 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
72 struct gfs2_jdesc *jd;
75 list_for_each_entry(jd, head, jd_list) {
76 if (jd->jd_jid == jid) {
88 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
90 struct gfs2_jdesc *jd;
92 spin_lock(&sdp->sd_jindex_spin);
93 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
94 spin_unlock(&sdp->sd_jindex_spin);
99 void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
101 struct gfs2_jdesc *jd;
103 spin_lock(&sdp->sd_jindex_spin);
104 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
107 spin_unlock(&sdp->sd_jindex_spin);
110 struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp)
112 struct gfs2_jdesc *jd;
115 spin_lock(&sdp->sd_jindex_spin);
117 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
124 spin_unlock(&sdp->sd_jindex_spin);
132 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
134 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
135 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
139 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
140 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
141 gfs2_consist_inode(ip);
144 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
146 error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
148 gfs2_consist_inode(ip);
156 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
157 * @sdp: the filesystem
162 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
164 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
165 struct gfs2_glock *j_gl = ip->i_gl;
166 struct gfs2_holder t_gh;
167 struct gfs2_log_header_host head;
170 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
174 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
176 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
180 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
186 /* Initialize some head of the log stuff */
187 sdp->sd_log_sequence = head.lh_sequence + 1;
188 gfs2_log_pointers_init(sdp, head.lh_blkno);
190 error = gfs2_quota_init(sdp);
194 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
196 gfs2_glock_dq_uninit(&t_gh);
201 t_gh.gh_flags |= GL_NOCACHE;
202 gfs2_glock_dq_uninit(&t_gh);
207 static void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
209 const struct gfs2_statfs_change *str = buf;
211 sc->sc_total = be64_to_cpu(str->sc_total);
212 sc->sc_free = be64_to_cpu(str->sc_free);
213 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
216 static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
218 struct gfs2_statfs_change *str = buf;
220 str->sc_total = cpu_to_be64(sc->sc_total);
221 str->sc_free = cpu_to_be64(sc->sc_free);
222 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
225 int gfs2_statfs_init(struct gfs2_sbd *sdp)
227 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
228 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
229 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
230 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
231 struct buffer_head *m_bh, *l_bh;
232 struct gfs2_holder gh;
235 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
240 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
244 if (sdp->sd_args.ar_spectator) {
245 spin_lock(&sdp->sd_statfs_spin);
246 gfs2_statfs_change_in(m_sc, m_bh->b_data +
247 sizeof(struct gfs2_dinode));
248 spin_unlock(&sdp->sd_statfs_spin);
250 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
254 spin_lock(&sdp->sd_statfs_spin);
255 gfs2_statfs_change_in(m_sc, m_bh->b_data +
256 sizeof(struct gfs2_dinode));
257 gfs2_statfs_change_in(l_sc, l_bh->b_data +
258 sizeof(struct gfs2_dinode));
259 spin_unlock(&sdp->sd_statfs_spin);
267 gfs2_glock_dq_uninit(&gh);
271 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
274 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
275 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 struct buffer_head *l_bh;
279 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
283 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
285 spin_lock(&sdp->sd_statfs_spin);
286 l_sc->sc_total += total;
287 l_sc->sc_free += free;
288 l_sc->sc_dinodes += dinodes;
289 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
290 spin_unlock(&sdp->sd_statfs_spin);
295 int gfs2_statfs_sync(struct gfs2_sbd *sdp)
297 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
298 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
299 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
300 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
301 struct gfs2_holder gh;
302 struct buffer_head *m_bh, *l_bh;
305 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
310 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
314 spin_lock(&sdp->sd_statfs_spin);
315 gfs2_statfs_change_in(m_sc, m_bh->b_data +
316 sizeof(struct gfs2_dinode));
317 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
318 spin_unlock(&sdp->sd_statfs_spin);
321 spin_unlock(&sdp->sd_statfs_spin);
323 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
327 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
331 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
333 spin_lock(&sdp->sd_statfs_spin);
334 m_sc->sc_total += l_sc->sc_total;
335 m_sc->sc_free += l_sc->sc_free;
336 m_sc->sc_dinodes += l_sc->sc_dinodes;
337 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
338 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
339 0, sizeof(struct gfs2_statfs_change));
340 spin_unlock(&sdp->sd_statfs_spin);
342 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
343 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
352 gfs2_glock_dq_uninit(&gh);
357 * gfs2_statfs_i - Do a statfs
358 * @sdp: the filesystem
359 * @sg: the sg structure
364 int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
366 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
367 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
369 spin_lock(&sdp->sd_statfs_spin);
372 sc->sc_total += l_sc->sc_total;
373 sc->sc_free += l_sc->sc_free;
374 sc->sc_dinodes += l_sc->sc_dinodes;
376 spin_unlock(&sdp->sd_statfs_spin);
380 if (sc->sc_free > sc->sc_total)
381 sc->sc_free = sc->sc_total;
382 if (sc->sc_dinodes < 0)
389 * statfs_fill - fill in the sg for a given RG
391 * @sc: the sc structure
393 * Returns: 0 on success, -ESTALE if the LVB is invalid
396 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
397 struct gfs2_statfs_change_host *sc)
399 gfs2_rgrp_verify(rgd);
400 sc->sc_total += rgd->rd_data;
401 sc->sc_free += rgd->rd_free;
402 sc->sc_dinodes += rgd->rd_dinodes;
407 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
408 * @sdp: the filesystem
409 * @sc: the sc info that will be returned
411 * Any error (other than a signal) will cause this routine to fall back
412 * to the synchronous version.
414 * FIXME: This really shouldn't busy wait like this.
419 int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
421 struct gfs2_holder ri_gh;
422 struct gfs2_rgrpd *rgd_next;
423 struct gfs2_holder *gha, *gh;
424 unsigned int slots = 64;
429 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
430 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
434 error = gfs2_rindex_hold(sdp, &ri_gh);
438 rgd_next = gfs2_rgrpd_get_first(sdp);
443 for (x = 0; x < slots; x++) {
446 if (gh->gh_gl && gfs2_glock_poll(gh)) {
447 err = gfs2_glock_wait(gh);
449 gfs2_holder_uninit(gh);
453 error = statfs_slow_fill(
454 gh->gh_gl->gl_object, sc);
455 gfs2_glock_dq_uninit(gh);
461 else if (rgd_next && !error) {
462 error = gfs2_glock_nq_init(rgd_next->rd_gl,
466 rgd_next = gfs2_rgrpd_get_next(rgd_next);
470 if (signal_pending(current))
471 error = -ERESTARTSYS;
480 gfs2_glock_dq_uninit(&ri_gh);
488 struct list_head list;
489 struct gfs2_holder gh;
493 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
495 * @sdp: the file system
496 * @state: the state to put the transaction lock into
497 * @t_gh: the hold on the transaction lock
502 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
503 struct gfs2_holder *t_gh)
505 struct gfs2_inode *ip;
506 struct gfs2_holder ji_gh;
507 struct gfs2_jdesc *jd;
510 struct gfs2_log_header_host lh;
513 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
514 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
519 ip = GFS2_I(jd->jd_inode);
520 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
525 list_add(&lfcc->list, &list);
528 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
531 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
532 error = gfs2_jdesc_check(jd);
535 error = gfs2_find_jhead(jd, &lh);
538 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
545 gfs2_glock_dq_uninit(t_gh);
548 while (!list_empty(&list)) {
549 lfcc = list_entry(list.next, struct lfcc, list);
550 list_del(&lfcc->list);
551 gfs2_glock_dq_uninit(&lfcc->gh);
554 gfs2_glock_dq_uninit(&ji_gh);
559 * gfs2_freeze_fs - freezes the file system
560 * @sdp: the file system
562 * This function flushes data and meta data for all machines by
563 * aquiring the transaction log exclusively. All journals are
564 * ensured to be in a clean state as well.
569 int gfs2_freeze_fs(struct gfs2_sbd *sdp)
573 mutex_lock(&sdp->sd_freeze_lock);
575 if (!sdp->sd_freeze_count++) {
576 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
578 sdp->sd_freeze_count--;
581 mutex_unlock(&sdp->sd_freeze_lock);
587 * gfs2_unfreeze_fs - unfreezes the file system
588 * @sdp: the file system
590 * This function allows the file system to proceed by unlocking
591 * the exclusively held transaction lock. Other GFS2 nodes are
592 * now free to acquire the lock shared and go on with their lives.
596 void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
598 mutex_lock(&sdp->sd_freeze_lock);
600 if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
601 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
603 mutex_unlock(&sdp->sd_freeze_lock);