2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
62 #include "ops_address.h"
68 struct gfs2_quota_host {
75 struct gfs2_quota_change_host {
77 u32 qc_flags; /* GFS2_QCF_... */
81 static u64 qd2offset(struct gfs2_quota_data *qd)
85 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
86 offset *= sizeof(struct gfs2_quota);
91 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
92 struct gfs2_quota_data **qdp)
94 struct gfs2_quota_data *qd;
97 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS);
104 set_bit(QDF_USER, &qd->qd_flags);
107 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
108 &gfs2_quota_glops, CREATE, &qd->qd_gl);
112 error = gfs2_lvb_hold(qd->qd_gl);
113 gfs2_glock_put(qd->qd_gl);
126 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
127 struct gfs2_quota_data **qdp)
129 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
136 spin_lock(&sdp->sd_quota_spin);
137 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
138 if (qd->qd_id == id &&
139 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
151 list_add(&qd->qd_list, &sdp->sd_quota_list);
152 atomic_inc(&sdp->sd_quota_count);
156 spin_unlock(&sdp->sd_quota_spin);
160 gfs2_lvb_unhold(new_qd->qd_gl);
167 error = qd_alloc(sdp, user, id, &new_qd);
173 static void qd_hold(struct gfs2_quota_data *qd)
175 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
177 spin_lock(&sdp->sd_quota_spin);
178 gfs2_assert(sdp, qd->qd_count);
180 spin_unlock(&sdp->sd_quota_spin);
183 static void qd_put(struct gfs2_quota_data *qd)
185 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
186 spin_lock(&sdp->sd_quota_spin);
187 gfs2_assert(sdp, qd->qd_count);
189 qd->qd_last_touched = jiffies;
190 spin_unlock(&sdp->sd_quota_spin);
193 static int slot_get(struct gfs2_quota_data *qd)
195 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
196 unsigned int c, o = 0, b;
197 unsigned char byte = 0;
199 spin_lock(&sdp->sd_quota_spin);
201 if (qd->qd_slot_count++) {
202 spin_unlock(&sdp->sd_quota_spin);
206 for (c = 0; c < sdp->sd_quota_chunks; c++)
207 for (o = 0; o < PAGE_SIZE; o++) {
208 byte = sdp->sd_quota_bitmap[c][o];
216 for (b = 0; b < 8; b++)
217 if (!(byte & (1 << b)))
219 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
221 if (qd->qd_slot >= sdp->sd_quota_slots)
224 sdp->sd_quota_bitmap[c][o] |= 1 << b;
226 spin_unlock(&sdp->sd_quota_spin);
232 spin_unlock(&sdp->sd_quota_spin);
236 static void slot_hold(struct gfs2_quota_data *qd)
238 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
240 spin_lock(&sdp->sd_quota_spin);
241 gfs2_assert(sdp, qd->qd_slot_count);
243 spin_unlock(&sdp->sd_quota_spin);
246 static void slot_put(struct gfs2_quota_data *qd)
248 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
250 spin_lock(&sdp->sd_quota_spin);
251 gfs2_assert(sdp, qd->qd_slot_count);
252 if (!--qd->qd_slot_count) {
253 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
256 spin_unlock(&sdp->sd_quota_spin);
259 static int bh_get(struct gfs2_quota_data *qd)
261 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
262 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
263 unsigned int block, offset;
264 struct buffer_head *bh;
266 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
268 mutex_lock(&sdp->sd_quota_mutex);
270 if (qd->qd_bh_count++) {
271 mutex_unlock(&sdp->sd_quota_mutex);
275 block = qd->qd_slot / sdp->sd_qc_per_block;
276 offset = qd->qd_slot % sdp->sd_qc_per_block;
278 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
279 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
282 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
286 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
290 qd->qd_bh_qc = (struct gfs2_quota_change *)
291 (bh->b_data + sizeof(struct gfs2_meta_header) +
292 offset * sizeof(struct gfs2_quota_change));
294 mutex_unlock(&sdp->sd_quota_mutex);
302 mutex_unlock(&sdp->sd_quota_mutex);
306 static void bh_put(struct gfs2_quota_data *qd)
308 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
310 mutex_lock(&sdp->sd_quota_mutex);
311 gfs2_assert(sdp, qd->qd_bh_count);
312 if (!--qd->qd_bh_count) {
317 mutex_unlock(&sdp->sd_quota_mutex);
320 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
322 struct gfs2_quota_data *qd = NULL;
328 if (sdp->sd_vfs->s_flags & MS_RDONLY)
331 spin_lock(&sdp->sd_quota_spin);
333 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
334 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
335 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
336 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
339 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
341 set_bit(QDF_LOCKED, &qd->qd_flags);
342 gfs2_assert_warn(sdp, qd->qd_count);
344 qd->qd_change_sync = qd->qd_change;
345 gfs2_assert_warn(sdp, qd->qd_slot_count);
355 spin_unlock(&sdp->sd_quota_spin);
358 gfs2_assert_warn(sdp, qd->qd_change_sync);
361 clear_bit(QDF_LOCKED, &qd->qd_flags);
373 static int qd_trylock(struct gfs2_quota_data *qd)
375 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
377 if (sdp->sd_vfs->s_flags & MS_RDONLY)
380 spin_lock(&sdp->sd_quota_spin);
382 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
383 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
384 spin_unlock(&sdp->sd_quota_spin);
388 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
390 set_bit(QDF_LOCKED, &qd->qd_flags);
391 gfs2_assert_warn(sdp, qd->qd_count);
393 qd->qd_change_sync = qd->qd_change;
394 gfs2_assert_warn(sdp, qd->qd_slot_count);
397 spin_unlock(&sdp->sd_quota_spin);
399 gfs2_assert_warn(sdp, qd->qd_change_sync);
401 clear_bit(QDF_LOCKED, &qd->qd_flags);
410 static void qd_unlock(struct gfs2_quota_data *qd)
412 gfs2_assert_warn(qd->qd_gl->gl_sbd,
413 test_bit(QDF_LOCKED, &qd->qd_flags));
414 clear_bit(QDF_LOCKED, &qd->qd_flags);
420 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
421 struct gfs2_quota_data **qdp)
425 error = qd_get(sdp, user, id, create, qdp);
429 error = slot_get(*qdp);
433 error = bh_get(*qdp);
446 static void qdsb_put(struct gfs2_quota_data *qd)
453 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
455 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
456 struct gfs2_alloc *al = ip->i_alloc;
457 struct gfs2_quota_data **qd = al->al_qd;
460 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
461 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
464 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
467 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
473 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
479 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
480 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
487 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
488 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
497 gfs2_quota_unhold(ip);
501 void gfs2_quota_unhold(struct gfs2_inode *ip)
503 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
504 struct gfs2_alloc *al = ip->i_alloc;
507 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
509 for (x = 0; x < al->al_qd_num; x++) {
510 qdsb_put(al->al_qd[x]);
516 static int sort_qd(const void *a, const void *b)
518 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
519 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
521 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
522 !test_bit(QDF_USER, &qd_b->qd_flags)) {
523 if (test_bit(QDF_USER, &qd_a->qd_flags))
528 if (qd_a->qd_id < qd_b->qd_id)
530 if (qd_a->qd_id > qd_b->qd_id)
536 static void do_qc(struct gfs2_quota_data *qd, s64 change)
538 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
539 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
540 struct gfs2_quota_change *qc = qd->qd_bh_qc;
543 mutex_lock(&sdp->sd_quota_mutex);
544 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
546 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
549 if (test_bit(QDF_USER, &qd->qd_flags))
550 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
551 qc->qc_id = cpu_to_be32(qd->qd_id);
554 x = be64_to_cpu(qc->qc_change) + change;
555 qc->qc_change = cpu_to_be64(x);
557 spin_lock(&sdp->sd_quota_spin);
559 spin_unlock(&sdp->sd_quota_spin);
562 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
563 clear_bit(QDF_CHANGE, &qd->qd_flags);
568 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
573 mutex_unlock(&sdp->sd_quota_mutex);
576 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
578 const struct gfs2_quota *str = buf;
580 qu->qu_limit = be64_to_cpu(str->qu_limit);
581 qu->qu_warn = be64_to_cpu(str->qu_warn);
582 qu->qu_value = be64_to_cpu(str->qu_value);
583 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
586 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
588 struct gfs2_quota *str = buf;
590 str->qu_limit = cpu_to_be64(qu->qu_limit);
591 str->qu_warn = cpu_to_be64(qu->qu_warn);
592 str->qu_value = cpu_to_be64(qu->qu_value);
593 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
594 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
600 * This function was mostly borrowed from gfs2_block_truncate_page which was
601 * in turn mostly borrowed from ext3
603 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
604 s64 change, struct gfs2_quota_data *qd)
606 struct inode *inode = &ip->i_inode;
607 struct address_space *mapping = inode->i_mapping;
608 unsigned long index = loc >> PAGE_CACHE_SHIFT;
609 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
610 unsigned blocksize, iblock, pos;
611 struct buffer_head *bh;
615 struct gfs2_quota_host qp;
619 if (gfs2_is_stuffed(ip))
620 gfs2_unstuff_dinode(ip, NULL);
622 page = grab_cache_page(mapping, index);
626 blocksize = inode->i_sb->s_blocksize;
627 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
629 if (!page_has_buffers(page))
630 create_empty_buffers(page, blocksize, 0);
632 bh = page_buffers(page);
634 while (offset >= pos) {
635 bh = bh->b_this_page;
640 if (!buffer_mapped(bh)) {
641 gfs2_block_map(inode, iblock, bh, 1);
642 if (!buffer_mapped(bh))
646 if (PageUptodate(page))
647 set_buffer_uptodate(bh);
649 if (!buffer_uptodate(bh)) {
650 ll_rw_block(READ_META, 1, &bh);
652 if (!buffer_uptodate(bh))
656 gfs2_trans_add_bh(ip->i_gl, bh, 0);
658 kaddr = kmap_atomic(page, KM_USER0);
659 ptr = kaddr + offset;
660 gfs2_quota_in(&qp, ptr);
661 qp.qu_value += change;
663 gfs2_quota_out(&qp, ptr);
664 flush_dcache_page(page);
665 kunmap_atomic(kaddr, KM_USER0);
667 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
668 qd->qd_qb.qb_value = cpu_to_be64(value);
669 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
670 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
673 page_cache_release(page);
677 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
679 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
680 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
681 unsigned int data_blocks, ind_blocks;
682 struct gfs2_holder *ghs, i_gh;
684 struct gfs2_quota_data *qd;
686 unsigned int nalloc = 0, blocks;
687 struct gfs2_alloc *al = NULL;
690 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
691 &data_blocks, &ind_blocks);
693 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
697 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
698 for (qx = 0; qx < num_qd; qx++) {
699 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
701 GL_NOCACHE, &ghs[qx]);
706 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
710 for (x = 0; x < num_qd; x++) {
713 offset = qd2offset(qda[x]);
714 error = gfs2_write_alloc_required(ip, offset,
715 sizeof(struct gfs2_quota),
723 al = gfs2_alloc_get(ip);
729 * 1 blk for unstuffing inode if stuffed. We add this extra
730 * block to the reservation unconditionally. If the inode
731 * doesn't need unstuffing, the block will be released to the
732 * rgrp since it won't be allocated during the transaction
734 al->al_requested = 1;
735 /* +1 in the end for block requested above for unstuffing */
736 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
739 al->al_requested += nalloc * (data_blocks + ind_blocks);
740 error = gfs2_inplace_reserve(ip);
745 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
747 error = gfs2_trans_begin(sdp, blocks, 0);
751 for (x = 0; x < num_qd; x++) {
753 offset = qd2offset(qd);
754 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
755 (struct gfs2_quota_data *)
760 do_qc(qd, -qd->qd_change_sync);
768 gfs2_inplace_release(ip);
772 gfs2_glock_dq_uninit(&i_gh);
775 gfs2_glock_dq_uninit(&ghs[qx]);
777 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
781 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
782 struct gfs2_holder *q_gh)
784 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
785 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
786 struct gfs2_holder i_gh;
787 struct gfs2_quota_host q;
788 char buf[sizeof(struct gfs2_quota)];
790 struct gfs2_quota_lvb *qlvb;
793 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
797 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
799 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
801 gfs2_glock_dq_uninit(q_gh);
802 error = gfs2_glock_nq_init(qd->qd_gl,
803 LM_ST_EXCLUSIVE, GL_NOCACHE,
808 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
812 memset(buf, 0, sizeof(struct gfs2_quota));
814 error = gfs2_internal_read(ip, NULL, buf, &pos,
815 sizeof(struct gfs2_quota));
819 gfs2_glock_dq_uninit(&i_gh);
822 gfs2_quota_in(&q, buf);
823 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
824 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
826 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
827 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
828 qlvb->qb_value = cpu_to_be64(q.qu_value);
831 if (gfs2_glock_is_blocking(qd->qd_gl)) {
832 gfs2_glock_dq_uninit(q_gh);
841 gfs2_glock_dq_uninit(&i_gh);
843 gfs2_glock_dq_uninit(q_gh);
847 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
849 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
850 struct gfs2_alloc *al = ip->i_alloc;
854 gfs2_quota_hold(ip, uid, gid);
856 if (capable(CAP_SYS_RESOURCE) ||
857 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
860 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
863 for (x = 0; x < al->al_qd_num; x++) {
864 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
870 set_bit(GIF_QD_LOCKED, &ip->i_flags);
873 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
874 gfs2_quota_unhold(ip);
880 static int need_sync(struct gfs2_quota_data *qd)
882 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
883 struct gfs2_tune *gt = &sdp->sd_tune;
885 unsigned int num, den;
888 if (!qd->qd_qb.qb_limit)
891 spin_lock(&sdp->sd_quota_spin);
892 value = qd->qd_change;
893 spin_unlock(&sdp->sd_quota_spin);
895 spin_lock(>->gt_spin);
896 num = gt->gt_quota_scale_num;
897 den = gt->gt_quota_scale_den;
898 spin_unlock(>->gt_spin);
902 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
903 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
906 value *= gfs2_jindex_size(sdp) * num;
908 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
909 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
916 void gfs2_quota_unlock(struct gfs2_inode *ip)
918 struct gfs2_alloc *al = ip->i_alloc;
919 struct gfs2_quota_data *qda[4];
920 unsigned int count = 0;
923 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
926 for (x = 0; x < al->al_qd_num; x++) {
927 struct gfs2_quota_data *qd;
931 sync = need_sync(qd);
933 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
935 if (sync && qd_trylock(qd))
941 for (x = 0; x < count; x++)
946 gfs2_quota_unhold(ip);
951 static int print_message(struct gfs2_quota_data *qd, char *type)
953 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
955 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
956 sdp->sd_fsname, type,
957 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
963 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
965 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
966 struct gfs2_alloc *al = ip->i_alloc;
967 struct gfs2_quota_data *qd;
972 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
975 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
978 for (x = 0; x < al->al_qd_num; x++) {
981 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
982 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
985 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
986 spin_lock(&sdp->sd_quota_spin);
987 value += qd->qd_change;
988 spin_unlock(&sdp->sd_quota_spin);
990 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
991 print_message(qd, "exceeded");
994 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
995 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
996 time_after_eq(jiffies, qd->qd_last_warn +
998 gt_quota_warn_period) * HZ)) {
999 error = print_message(qd, "warning");
1000 qd->qd_last_warn = jiffies;
1007 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1010 struct gfs2_alloc *al = ip->i_alloc;
1011 struct gfs2_quota_data *qd;
1014 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1016 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1019 for (x = 0; x < al->al_qd_num; x++) {
1022 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1023 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1029 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1031 struct gfs2_quota_data **qda;
1032 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1033 unsigned int num_qd;
1037 sdp->sd_quota_sync_gen++;
1039 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1047 error = qd_fish(sdp, qda + num_qd);
1048 if (error || !qda[num_qd])
1050 if (++num_qd == max_qd)
1056 error = do_sync(num_qd, qda);
1058 for (x = 0; x < num_qd; x++)
1059 qda[x]->qd_sync_gen =
1060 sdp->sd_quota_sync_gen;
1062 for (x = 0; x < num_qd; x++)
1065 } while (!error && num_qd == max_qd);
1072 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1074 struct gfs2_quota_data *qd;
1075 struct gfs2_holder q_gh;
1078 error = qd_get(sdp, user, id, CREATE, &qd);
1082 error = do_glock(qd, FORCE, &q_gh);
1084 gfs2_glock_dq_uninit(&q_gh);
1091 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1093 const struct gfs2_quota_change *str = buf;
1095 qc->qc_change = be64_to_cpu(str->qc_change);
1096 qc->qc_flags = be32_to_cpu(str->qc_flags);
1097 qc->qc_id = be32_to_cpu(str->qc_id);
1100 int gfs2_quota_init(struct gfs2_sbd *sdp)
1102 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1103 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1104 unsigned int x, slot = 0;
1105 unsigned int found = 0;
1110 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1111 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1112 gfs2_consist_inode(ip);
1115 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1116 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1120 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1121 sizeof(unsigned char *), GFP_NOFS);
1122 if (!sdp->sd_quota_bitmap)
1125 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1126 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1127 if (!sdp->sd_quota_bitmap[x])
1131 for (x = 0; x < blocks; x++) {
1132 struct buffer_head *bh;
1137 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1142 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1145 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1150 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1152 struct gfs2_quota_change_host qc;
1153 struct gfs2_quota_data *qd;
1155 gfs2_quota_change_in(&qc, bh->b_data +
1156 sizeof(struct gfs2_meta_header) +
1157 y * sizeof(struct gfs2_quota_change));
1161 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1168 set_bit(QDF_CHANGE, &qd->qd_flags);
1169 qd->qd_change = qc.qc_change;
1171 qd->qd_slot_count = 1;
1172 qd->qd_last_touched = jiffies;
1174 spin_lock(&sdp->sd_quota_spin);
1175 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1176 list_add(&qd->qd_list, &sdp->sd_quota_list);
1177 atomic_inc(&sdp->sd_quota_count);
1178 spin_unlock(&sdp->sd_quota_spin);
1189 fs_info(sdp, "found %u quota changes\n", found);
1194 gfs2_quota_cleanup(sdp);
1198 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1200 struct gfs2_quota_data *qd, *safe;
1203 spin_lock(&sdp->sd_quota_spin);
1204 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1205 if (!qd->qd_count &&
1206 time_after_eq(jiffies, qd->qd_last_touched +
1207 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1208 list_move(&qd->qd_list, &dead);
1209 gfs2_assert_warn(sdp,
1210 atomic_read(&sdp->sd_quota_count) > 0);
1211 atomic_dec(&sdp->sd_quota_count);
1214 spin_unlock(&sdp->sd_quota_spin);
1216 while (!list_empty(&dead)) {
1217 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1218 list_del(&qd->qd_list);
1220 gfs2_assert_warn(sdp, !qd->qd_change);
1221 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1222 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1224 gfs2_lvb_unhold(qd->qd_gl);
1229 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1231 struct list_head *head = &sdp->sd_quota_list;
1232 struct gfs2_quota_data *qd;
1235 spin_lock(&sdp->sd_quota_spin);
1236 while (!list_empty(head)) {
1237 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1239 if (qd->qd_count > 1 ||
1240 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1241 list_move(&qd->qd_list, head);
1242 spin_unlock(&sdp->sd_quota_spin);
1244 spin_lock(&sdp->sd_quota_spin);
1248 list_del(&qd->qd_list);
1249 atomic_dec(&sdp->sd_quota_count);
1250 spin_unlock(&sdp->sd_quota_spin);
1252 if (!qd->qd_count) {
1253 gfs2_assert_warn(sdp, !qd->qd_change);
1254 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1256 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1257 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1259 gfs2_lvb_unhold(qd->qd_gl);
1262 spin_lock(&sdp->sd_quota_spin);
1264 spin_unlock(&sdp->sd_quota_spin);
1266 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1268 if (sdp->sd_quota_bitmap) {
1269 for (x = 0; x < sdp->sd_quota_chunks; x++)
1270 kfree(sdp->sd_quota_bitmap[x]);
1271 kfree(sdp->sd_quota_bitmap);