2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
63 #include "ops_address.h"
69 struct gfs2_quota_host {
76 struct gfs2_quota_change_host {
78 u32 qc_flags; /* GFS2_QCF_... */
82 static u64 qd2offset(struct gfs2_quota_data *qd)
86 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
87 offset *= sizeof(struct gfs2_quota);
92 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
93 struct gfs2_quota_data **qdp)
95 struct gfs2_quota_data *qd;
98 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
105 set_bit(QDF_USER, &qd->qd_flags);
108 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
109 &gfs2_quota_glops, CREATE, &qd->qd_gl);
113 error = gfs2_lvb_hold(qd->qd_gl);
114 gfs2_glock_put(qd->qd_gl);
127 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
128 struct gfs2_quota_data **qdp)
130 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
137 spin_lock(&sdp->sd_quota_spin);
138 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
139 if (qd->qd_id == id &&
140 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
152 list_add(&qd->qd_list, &sdp->sd_quota_list);
153 atomic_inc(&sdp->sd_quota_count);
157 spin_unlock(&sdp->sd_quota_spin);
161 gfs2_lvb_unhold(new_qd->qd_gl);
168 error = qd_alloc(sdp, user, id, &new_qd);
174 static void qd_hold(struct gfs2_quota_data *qd)
176 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
178 spin_lock(&sdp->sd_quota_spin);
179 gfs2_assert(sdp, qd->qd_count);
181 spin_unlock(&sdp->sd_quota_spin);
184 static void qd_put(struct gfs2_quota_data *qd)
186 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
187 spin_lock(&sdp->sd_quota_spin);
188 gfs2_assert(sdp, qd->qd_count);
190 qd->qd_last_touched = jiffies;
191 spin_unlock(&sdp->sd_quota_spin);
194 static int slot_get(struct gfs2_quota_data *qd)
196 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
197 unsigned int c, o = 0, b;
198 unsigned char byte = 0;
200 spin_lock(&sdp->sd_quota_spin);
202 if (qd->qd_slot_count++) {
203 spin_unlock(&sdp->sd_quota_spin);
207 for (c = 0; c < sdp->sd_quota_chunks; c++)
208 for (o = 0; o < PAGE_SIZE; o++) {
209 byte = sdp->sd_quota_bitmap[c][o];
217 for (b = 0; b < 8; b++)
218 if (!(byte & (1 << b)))
220 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
222 if (qd->qd_slot >= sdp->sd_quota_slots)
225 sdp->sd_quota_bitmap[c][o] |= 1 << b;
227 spin_unlock(&sdp->sd_quota_spin);
233 spin_unlock(&sdp->sd_quota_spin);
237 static void slot_hold(struct gfs2_quota_data *qd)
239 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
241 spin_lock(&sdp->sd_quota_spin);
242 gfs2_assert(sdp, qd->qd_slot_count);
244 spin_unlock(&sdp->sd_quota_spin);
247 static void slot_put(struct gfs2_quota_data *qd)
249 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
251 spin_lock(&sdp->sd_quota_spin);
252 gfs2_assert(sdp, qd->qd_slot_count);
253 if (!--qd->qd_slot_count) {
254 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
257 spin_unlock(&sdp->sd_quota_spin);
260 static int bh_get(struct gfs2_quota_data *qd)
262 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
263 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
264 unsigned int block, offset;
265 struct buffer_head *bh;
267 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
269 mutex_lock(&sdp->sd_quota_mutex);
271 if (qd->qd_bh_count++) {
272 mutex_unlock(&sdp->sd_quota_mutex);
276 block = qd->qd_slot / sdp->sd_qc_per_block;
277 offset = qd->qd_slot % sdp->sd_qc_per_block;;
279 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
280 error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
283 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
287 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
291 qd->qd_bh_qc = (struct gfs2_quota_change *)
292 (bh->b_data + sizeof(struct gfs2_meta_header) +
293 offset * sizeof(struct gfs2_quota_change));
295 mutex_unlock(&sdp->sd_quota_mutex);
303 mutex_unlock(&sdp->sd_quota_mutex);
307 static void bh_put(struct gfs2_quota_data *qd)
309 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
311 mutex_lock(&sdp->sd_quota_mutex);
312 gfs2_assert(sdp, qd->qd_bh_count);
313 if (!--qd->qd_bh_count) {
318 mutex_unlock(&sdp->sd_quota_mutex);
321 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
323 struct gfs2_quota_data *qd = NULL;
329 if (sdp->sd_vfs->s_flags & MS_RDONLY)
332 spin_lock(&sdp->sd_quota_spin);
334 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
335 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
336 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
337 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
340 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
342 set_bit(QDF_LOCKED, &qd->qd_flags);
343 gfs2_assert_warn(sdp, qd->qd_count);
345 qd->qd_change_sync = qd->qd_change;
346 gfs2_assert_warn(sdp, qd->qd_slot_count);
356 spin_unlock(&sdp->sd_quota_spin);
359 gfs2_assert_warn(sdp, qd->qd_change_sync);
362 clear_bit(QDF_LOCKED, &qd->qd_flags);
374 static int qd_trylock(struct gfs2_quota_data *qd)
376 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
378 if (sdp->sd_vfs->s_flags & MS_RDONLY)
381 spin_lock(&sdp->sd_quota_spin);
383 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
384 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
385 spin_unlock(&sdp->sd_quota_spin);
389 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
391 set_bit(QDF_LOCKED, &qd->qd_flags);
392 gfs2_assert_warn(sdp, qd->qd_count);
394 qd->qd_change_sync = qd->qd_change;
395 gfs2_assert_warn(sdp, qd->qd_slot_count);
398 spin_unlock(&sdp->sd_quota_spin);
400 gfs2_assert_warn(sdp, qd->qd_change_sync);
402 clear_bit(QDF_LOCKED, &qd->qd_flags);
411 static void qd_unlock(struct gfs2_quota_data *qd)
413 gfs2_assert_warn(qd->qd_gl->gl_sbd,
414 test_bit(QDF_LOCKED, &qd->qd_flags));
415 clear_bit(QDF_LOCKED, &qd->qd_flags);
421 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
422 struct gfs2_quota_data **qdp)
426 error = qd_get(sdp, user, id, create, qdp);
430 error = slot_get(*qdp);
434 error = bh_get(*qdp);
447 static void qdsb_put(struct gfs2_quota_data *qd)
454 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
456 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
457 struct gfs2_alloc *al = &ip->i_alloc;
458 struct gfs2_quota_data **qd = al->al_qd;
461 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
462 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
465 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
468 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
474 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
480 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
481 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
488 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
489 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
498 gfs2_quota_unhold(ip);
502 void gfs2_quota_unhold(struct gfs2_inode *ip)
504 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
505 struct gfs2_alloc *al = &ip->i_alloc;
508 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
510 for (x = 0; x < al->al_qd_num; x++) {
511 qdsb_put(al->al_qd[x]);
517 static int sort_qd(const void *a, const void *b)
519 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
520 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
522 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
523 !test_bit(QDF_USER, &qd_b->qd_flags)) {
524 if (test_bit(QDF_USER, &qd_a->qd_flags))
529 if (qd_a->qd_id < qd_b->qd_id)
531 if (qd_a->qd_id > qd_b->qd_id)
537 static void do_qc(struct gfs2_quota_data *qd, s64 change)
539 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
540 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
541 struct gfs2_quota_change *qc = qd->qd_bh_qc;
544 mutex_lock(&sdp->sd_quota_mutex);
545 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
547 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
550 if (test_bit(QDF_USER, &qd->qd_flags))
551 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
552 qc->qc_id = cpu_to_be32(qd->qd_id);
555 x = be64_to_cpu(qc->qc_change) + change;
556 qc->qc_change = cpu_to_be64(x);
558 spin_lock(&sdp->sd_quota_spin);
560 spin_unlock(&sdp->sd_quota_spin);
563 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
564 clear_bit(QDF_CHANGE, &qd->qd_flags);
569 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
574 mutex_unlock(&sdp->sd_quota_mutex);
577 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
579 const struct gfs2_quota *str = buf;
581 qu->qu_limit = be64_to_cpu(str->qu_limit);
582 qu->qu_warn = be64_to_cpu(str->qu_warn);
583 qu->qu_value = be64_to_cpu(str->qu_value);
584 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
587 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
589 struct gfs2_quota *str = buf;
591 str->qu_limit = cpu_to_be64(qu->qu_limit);
592 str->qu_warn = cpu_to_be64(qu->qu_warn);
593 str->qu_value = cpu_to_be64(qu->qu_value);
594 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
595 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
601 * This function was mostly borrowed from gfs2_block_truncate_page which was
602 * in turn mostly borrowed from ext3
604 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
605 s64 change, struct gfs2_quota_data *qd)
607 struct inode *inode = &ip->i_inode;
608 struct address_space *mapping = inode->i_mapping;
609 unsigned long index = loc >> PAGE_CACHE_SHIFT;
610 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
611 unsigned blocksize, iblock, pos;
612 struct buffer_head *bh;
616 struct gfs2_quota_host qp;
620 if (gfs2_is_stuffed(ip)) {
621 struct gfs2_alloc *al = NULL;
622 al = gfs2_alloc_get(ip);
623 /* just request 1 blk */
624 al->al_requested = 1;
625 gfs2_inplace_reserve(ip);
626 gfs2_unstuff_dinode(ip, NULL);
627 gfs2_inplace_release(ip);
630 page = grab_cache_page(mapping, index);
634 blocksize = inode->i_sb->s_blocksize;
635 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
637 if (!page_has_buffers(page))
638 create_empty_buffers(page, blocksize, 0);
640 bh = page_buffers(page);
642 while (offset >= pos) {
643 bh = bh->b_this_page;
648 if (!buffer_mapped(bh)) {
649 gfs2_get_block(inode, iblock, bh, 1);
650 if (!buffer_mapped(bh))
654 if (PageUptodate(page))
655 set_buffer_uptodate(bh);
657 if (!buffer_uptodate(bh)) {
658 ll_rw_block(READ_META, 1, &bh);
660 if (!buffer_uptodate(bh))
664 gfs2_trans_add_bh(ip->i_gl, bh, 0);
666 kaddr = kmap_atomic(page, KM_USER0);
667 ptr = kaddr + offset;
668 gfs2_quota_in(&qp, ptr);
669 qp.qu_value += change;
671 gfs2_quota_out(&qp, ptr);
672 flush_dcache_page(page);
673 kunmap_atomic(kaddr, KM_USER0);
675 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
676 qd->qd_qb.qb_value = cpu_to_be64(value);
677 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
678 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
681 page_cache_release(page);
685 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
687 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
688 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
689 unsigned int data_blocks, ind_blocks;
690 struct gfs2_holder *ghs, i_gh;
692 struct gfs2_quota_data *qd;
694 unsigned int nalloc = 0;
695 struct gfs2_alloc *al = NULL;
698 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
699 &data_blocks, &ind_blocks);
701 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
705 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
706 for (qx = 0; qx < num_qd; qx++) {
707 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
709 GL_NOCACHE, &ghs[qx]);
714 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
718 for (x = 0; x < num_qd; x++) {
721 offset = qd2offset(qda[x]);
722 error = gfs2_write_alloc_required(ip, offset,
723 sizeof(struct gfs2_quota),
732 al = gfs2_alloc_get(ip);
734 al->al_requested = nalloc * (data_blocks + ind_blocks);
736 error = gfs2_inplace_reserve(ip);
740 error = gfs2_trans_begin(sdp,
741 al->al_rgd->rd_length +
742 num_qd * data_blocks +
743 nalloc * ind_blocks +
744 RES_DINODE + num_qd +
749 error = gfs2_trans_begin(sdp,
750 num_qd * data_blocks +
751 RES_DINODE + num_qd, 0);
756 for (x = 0; x < num_qd; x++) {
758 offset = qd2offset(qd);
759 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
760 (struct gfs2_quota_data *)
765 do_qc(qd, -qd->qd_change_sync);
774 gfs2_inplace_release(ip);
779 gfs2_glock_dq_uninit(&i_gh);
782 gfs2_glock_dq_uninit(&ghs[qx]);
784 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
788 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
789 struct gfs2_holder *q_gh)
791 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
792 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
793 struct gfs2_holder i_gh;
794 struct gfs2_quota_host q;
795 char buf[sizeof(struct gfs2_quota)];
796 struct file_ra_state ra_state;
798 struct gfs2_quota_lvb *qlvb;
800 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
802 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
806 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
808 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
810 gfs2_glock_dq_uninit(q_gh);
811 error = gfs2_glock_nq_init(qd->qd_gl,
812 LM_ST_EXCLUSIVE, GL_NOCACHE,
817 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
821 memset(buf, 0, sizeof(struct gfs2_quota));
823 error = gfs2_internal_read(ip, &ra_state, buf,
824 &pos, sizeof(struct gfs2_quota));
828 gfs2_glock_dq_uninit(&i_gh);
831 gfs2_quota_in(&q, buf);
832 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
833 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
835 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
836 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
837 qlvb->qb_value = cpu_to_be64(q.qu_value);
840 if (gfs2_glock_is_blocking(qd->qd_gl)) {
841 gfs2_glock_dq_uninit(q_gh);
850 gfs2_glock_dq_uninit(&i_gh);
852 gfs2_glock_dq_uninit(q_gh);
856 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
858 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
859 struct gfs2_alloc *al = &ip->i_alloc;
863 gfs2_quota_hold(ip, uid, gid);
865 if (capable(CAP_SYS_RESOURCE) ||
866 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
869 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
872 for (x = 0; x < al->al_qd_num; x++) {
873 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
879 set_bit(GIF_QD_LOCKED, &ip->i_flags);
882 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
883 gfs2_quota_unhold(ip);
889 static int need_sync(struct gfs2_quota_data *qd)
891 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
892 struct gfs2_tune *gt = &sdp->sd_tune;
894 unsigned int num, den;
897 if (!qd->qd_qb.qb_limit)
900 spin_lock(&sdp->sd_quota_spin);
901 value = qd->qd_change;
902 spin_unlock(&sdp->sd_quota_spin);
904 spin_lock(>->gt_spin);
905 num = gt->gt_quota_scale_num;
906 den = gt->gt_quota_scale_den;
907 spin_unlock(>->gt_spin);
911 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
912 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
915 value *= gfs2_jindex_size(sdp) * num;
917 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
918 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
925 void gfs2_quota_unlock(struct gfs2_inode *ip)
927 struct gfs2_alloc *al = &ip->i_alloc;
928 struct gfs2_quota_data *qda[4];
929 unsigned int count = 0;
932 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
935 for (x = 0; x < al->al_qd_num; x++) {
936 struct gfs2_quota_data *qd;
940 sync = need_sync(qd);
942 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
944 if (sync && qd_trylock(qd))
950 for (x = 0; x < count; x++)
955 gfs2_quota_unhold(ip);
960 static int print_message(struct gfs2_quota_data *qd, char *type)
962 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
964 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
965 sdp->sd_fsname, type,
966 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
972 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
974 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
975 struct gfs2_alloc *al = &ip->i_alloc;
976 struct gfs2_quota_data *qd;
981 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
984 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
987 for (x = 0; x < al->al_qd_num; x++) {
990 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
991 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
994 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
995 spin_lock(&sdp->sd_quota_spin);
996 value += qd->qd_change;
997 spin_unlock(&sdp->sd_quota_spin);
999 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1000 print_message(qd, "exceeded");
1003 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1004 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1005 time_after_eq(jiffies, qd->qd_last_warn +
1007 gt_quota_warn_period) * HZ)) {
1008 error = print_message(qd, "warning");
1009 qd->qd_last_warn = jiffies;
1016 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1019 struct gfs2_alloc *al = &ip->i_alloc;
1020 struct gfs2_quota_data *qd;
1022 unsigned int found = 0;
1024 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1026 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1029 for (x = 0; x < al->al_qd_num; x++) {
1032 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1033 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1040 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1042 struct gfs2_quota_data **qda;
1043 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1044 unsigned int num_qd;
1048 sdp->sd_quota_sync_gen++;
1050 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1058 error = qd_fish(sdp, qda + num_qd);
1059 if (error || !qda[num_qd])
1061 if (++num_qd == max_qd)
1067 error = do_sync(num_qd, qda);
1069 for (x = 0; x < num_qd; x++)
1070 qda[x]->qd_sync_gen =
1071 sdp->sd_quota_sync_gen;
1073 for (x = 0; x < num_qd; x++)
1076 } while (!error && num_qd == max_qd);
1083 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1085 struct gfs2_quota_data *qd;
1086 struct gfs2_holder q_gh;
1089 error = qd_get(sdp, user, id, CREATE, &qd);
1093 error = do_glock(qd, FORCE, &q_gh);
1095 gfs2_glock_dq_uninit(&q_gh);
1102 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1104 const struct gfs2_quota_change *str = buf;
1106 qc->qc_change = be64_to_cpu(str->qc_change);
1107 qc->qc_flags = be32_to_cpu(str->qc_flags);
1108 qc->qc_id = be32_to_cpu(str->qc_id);
1111 int gfs2_quota_init(struct gfs2_sbd *sdp)
1113 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1114 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1115 unsigned int x, slot = 0;
1116 unsigned int found = 0;
1121 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1122 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1123 gfs2_consist_inode(ip);
1126 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1127 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1131 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1132 sizeof(unsigned char *), GFP_KERNEL);
1133 if (!sdp->sd_quota_bitmap)
1136 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1137 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1138 if (!sdp->sd_quota_bitmap[x])
1142 for (x = 0; x < blocks; x++) {
1143 struct buffer_head *bh;
1148 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1153 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1156 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1161 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1163 struct gfs2_quota_change_host qc;
1164 struct gfs2_quota_data *qd;
1166 gfs2_quota_change_in(&qc, bh->b_data +
1167 sizeof(struct gfs2_meta_header) +
1168 y * sizeof(struct gfs2_quota_change));
1172 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1179 set_bit(QDF_CHANGE, &qd->qd_flags);
1180 qd->qd_change = qc.qc_change;
1182 qd->qd_slot_count = 1;
1183 qd->qd_last_touched = jiffies;
1185 spin_lock(&sdp->sd_quota_spin);
1186 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1187 list_add(&qd->qd_list, &sdp->sd_quota_list);
1188 atomic_inc(&sdp->sd_quota_count);
1189 spin_unlock(&sdp->sd_quota_spin);
1200 fs_info(sdp, "found %u quota changes\n", found);
1205 gfs2_quota_cleanup(sdp);
1209 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1211 struct gfs2_quota_data *qd, *safe;
1214 spin_lock(&sdp->sd_quota_spin);
1215 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1216 if (!qd->qd_count &&
1217 time_after_eq(jiffies, qd->qd_last_touched +
1218 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1219 list_move(&qd->qd_list, &dead);
1220 gfs2_assert_warn(sdp,
1221 atomic_read(&sdp->sd_quota_count) > 0);
1222 atomic_dec(&sdp->sd_quota_count);
1225 spin_unlock(&sdp->sd_quota_spin);
1227 while (!list_empty(&dead)) {
1228 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1229 list_del(&qd->qd_list);
1231 gfs2_assert_warn(sdp, !qd->qd_change);
1232 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1233 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1235 gfs2_lvb_unhold(qd->qd_gl);
1240 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1242 struct list_head *head = &sdp->sd_quota_list;
1243 struct gfs2_quota_data *qd;
1246 spin_lock(&sdp->sd_quota_spin);
1247 while (!list_empty(head)) {
1248 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1250 if (qd->qd_count > 1 ||
1251 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1252 list_move(&qd->qd_list, head);
1253 spin_unlock(&sdp->sd_quota_spin);
1255 spin_lock(&sdp->sd_quota_spin);
1259 list_del(&qd->qd_list);
1260 atomic_dec(&sdp->sd_quota_count);
1261 spin_unlock(&sdp->sd_quota_spin);
1263 if (!qd->qd_count) {
1264 gfs2_assert_warn(sdp, !qd->qd_change);
1265 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1267 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1268 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1270 gfs2_lvb_unhold(qd->qd_gl);
1273 spin_lock(&sdp->sd_quota_spin);
1275 spin_unlock(&sdp->sd_quota_spin);
1277 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1279 if (sdp->sd_quota_bitmap) {
1280 for (x = 0; x < sdp->sd_quota_chunks; x++)
1281 kfree(sdp->sd_quota_bitmap[x]);
1282 kfree(sdp->sd_quota_bitmap);