2 * Implementation of operations over global quota file
4 #include <linux/spinlock.h>
6 #include <linux/quota.h>
7 #include <linux/quotaops.h>
8 #include <linux/dqblk_qtree.h>
9 #include <linux/jiffies.h>
10 #include <linux/writeback.h>
11 #include <linux/workqueue.h>
13 #define MLOG_MASK_PREFIX ML_QUOTA
14 #include <cluster/masklog.h>
19 #include "blockcheck.h"
28 static struct workqueue_struct *ocfs2_quota_wq = NULL;
30 static void qsync_work_fn(struct work_struct *work);
32 static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
34 struct ocfs2_global_disk_dqblk *d = dp;
35 struct mem_dqblk *m = &dquot->dq_dqb;
37 /* Update from disk only entries not set by the admin */
38 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
39 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
40 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
42 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
43 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
44 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
45 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
46 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
48 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
49 m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
50 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
51 m->dqb_btime = le64_to_cpu(d->dqb_btime);
52 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
53 m->dqb_itime = le64_to_cpu(d->dqb_itime);
54 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
57 static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
59 struct ocfs2_global_disk_dqblk *d = dp;
60 struct mem_dqblk *m = &dquot->dq_dqb;
62 d->dqb_id = cpu_to_le32(dquot->dq_id);
63 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
64 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
65 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
66 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
67 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
68 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
69 d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
70 d->dqb_btime = cpu_to_le64(m->dqb_btime);
71 d->dqb_itime = cpu_to_le64(m->dqb_itime);
74 static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
76 struct ocfs2_global_disk_dqblk *d = dp;
77 struct ocfs2_mem_dqinfo *oinfo =
78 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
80 if (qtree_entry_unused(&oinfo->dqi_gi, dp))
82 return le32_to_cpu(d->dqb_id) == dquot->dq_id;
85 struct qtree_fmt_operations ocfs2_global_ops = {
86 .mem2disk_dqblk = ocfs2_global_mem2diskdqb,
87 .disk2mem_dqblk = ocfs2_global_disk2memdqb,
88 .is_id = ocfs2_global_is_id,
91 static int ocfs2_validate_quota_block(struct super_block *sb,
92 struct buffer_head *bh)
94 struct ocfs2_disk_dqtrailer *dqt =
95 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
97 mlog(0, "Validating quota block %llu\n",
98 (unsigned long long)bh->b_blocknr);
100 BUG_ON(!buffer_uptodate(bh));
103 * If the ecc fails, we return the error but otherwise
104 * leave the filesystem running. We know any error is
105 * local to this block.
107 return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
110 int ocfs2_read_quota_block(struct inode *inode, u64 v_block,
111 struct buffer_head **bh)
114 struct buffer_head *tmp = *bh;
116 rc = ocfs2_read_virt_blocks(inode, v_block, 1, &tmp, 0,
117 ocfs2_validate_quota_block);
121 /* If ocfs2_read_virt_blocks() got us a new bh, pass it up. */
128 static int ocfs2_get_quota_block(struct inode *inode, int block,
129 struct buffer_head **bh)
134 down_read(&OCFS2_I(inode)->ip_alloc_sem);
135 err = ocfs2_extent_map_get_blocks(inode, block, &pblock, &pcount, NULL);
136 up_read(&OCFS2_I(inode)->ip_alloc_sem);
141 *bh = sb_getblk(inode->i_sb, pblock);
149 /* Read data from global quotafile - avoid pagecache and such because we cannot
150 * afford acquiring the locks... We use quota cluster lock to serialize
151 * operations. Caller is responsible for acquiring it. */
152 ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
153 size_t len, loff_t off)
155 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
156 struct inode *gqinode = oinfo->dqi_gqinode;
157 loff_t i_size = i_size_read(gqinode);
158 int offset = off & (sb->s_blocksize - 1);
159 sector_t blk = off >> sb->s_blocksize_bits;
161 struct buffer_head *bh;
162 size_t toread, tocopy;
166 if (off + len > i_size)
170 tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
172 err = ocfs2_read_quota_block(gqinode, blk, &bh);
177 memcpy(data, bh->b_data + offset, tocopy);
187 /* Write to quotafile (we know the transaction is already started and has
189 ssize_t ocfs2_quota_write(struct super_block *sb, int type,
190 const char *data, size_t len, loff_t off)
192 struct mem_dqinfo *info = sb_dqinfo(sb, type);
193 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
194 struct inode *gqinode = oinfo->dqi_gqinode;
195 int offset = off & (sb->s_blocksize - 1);
196 sector_t blk = off >> sb->s_blocksize_bits;
197 int err = 0, new = 0, ja_type;
198 struct buffer_head *bh = NULL;
199 handle_t *handle = journal_current_handle();
202 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
203 "because transaction was not started.\n",
204 (unsigned long long)off, (unsigned long long)len);
207 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
209 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
212 mutex_lock_nested(&gqinode->i_mutex, I_MUTEX_QUOTA);
213 if (gqinode->i_size < off + len) {
214 down_write(&OCFS2_I(gqinode)->ip_alloc_sem);
215 err = ocfs2_extend_no_holes(gqinode, off + len, off);
216 up_write(&OCFS2_I(gqinode)->ip_alloc_sem);
219 err = ocfs2_simple_size_update(gqinode,
226 /* Not rewriting whole block? */
227 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
229 err = ocfs2_read_quota_block(gqinode, blk, &bh);
230 ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
232 err = ocfs2_get_quota_block(gqinode, blk, &bh);
233 ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
241 memset(bh->b_data, 0, sb->s_blocksize);
242 memcpy(bh->b_data + offset, data, len);
243 flush_dcache_page(bh->b_page);
244 set_buffer_uptodate(bh);
246 ocfs2_set_buffer_uptodate(gqinode, bh);
247 err = ocfs2_journal_access_dq(handle, gqinode, bh, ja_type);
252 err = ocfs2_journal_dirty(handle, bh);
258 mutex_unlock(&gqinode->i_mutex);
262 gqinode->i_version++;
263 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
264 mutex_unlock(&gqinode->i_mutex);
268 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
271 struct buffer_head *bh = NULL;
273 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
276 spin_lock(&dq_data_lock);
277 if (!oinfo->dqi_gqi_count++)
278 oinfo->dqi_gqi_bh = bh;
280 WARN_ON(bh != oinfo->dqi_gqi_bh);
281 spin_unlock(&dq_data_lock);
285 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
287 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
288 brelse(oinfo->dqi_gqi_bh);
289 spin_lock(&dq_data_lock);
290 if (!--oinfo->dqi_gqi_count)
291 oinfo->dqi_gqi_bh = NULL;
292 spin_unlock(&dq_data_lock);
295 /* Read information header from global quota file */
296 int ocfs2_global_read_info(struct super_block *sb, int type)
298 struct inode *gqinode = NULL;
299 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
300 GROUP_QUOTA_SYSTEM_INODE };
301 struct ocfs2_global_disk_dqinfo dinfo;
302 struct mem_dqinfo *info = sb_dqinfo(sb, type);
303 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
308 /* Read global header */
309 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
312 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
317 oinfo->dqi_gi.dqi_sb = sb;
318 oinfo->dqi_gi.dqi_type = type;
319 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
320 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
321 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
322 oinfo->dqi_gqi_bh = NULL;
323 oinfo->dqi_gqi_count = 0;
324 oinfo->dqi_gqinode = gqinode;
325 status = ocfs2_lock_global_qf(oinfo, 0);
330 status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
331 sizeof(struct ocfs2_global_disk_dqinfo),
332 OCFS2_GLOBAL_INFO_OFF);
333 ocfs2_unlock_global_qf(oinfo, 0);
334 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
335 mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
342 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
343 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
344 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
345 oinfo->dqi_syncjiff = msecs_to_jiffies(oinfo->dqi_syncms);
346 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
347 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
348 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
349 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
350 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
351 OCFS2_QBLK_RESERVED_SPACE;
352 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
353 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
354 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
355 oinfo->dqi_syncjiff);
362 /* Write information to global quota file. Expects exlusive lock on quota
363 * file inode and quota info */
364 static int __ocfs2_global_write_info(struct super_block *sb, int type)
366 struct mem_dqinfo *info = sb_dqinfo(sb, type);
367 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
368 struct ocfs2_global_disk_dqinfo dinfo;
371 spin_lock(&dq_data_lock);
372 info->dqi_flags &= ~DQF_INFO_DIRTY;
373 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
374 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
375 spin_unlock(&dq_data_lock);
376 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
377 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
378 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
379 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
380 size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
381 sizeof(struct ocfs2_global_disk_dqinfo),
382 OCFS2_GLOBAL_INFO_OFF);
383 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
384 mlog(ML_ERROR, "Cannot write global quota info structure\n");
392 int ocfs2_global_write_info(struct super_block *sb, int type)
395 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
397 err = ocfs2_qinfo_lock(info, 1);
400 err = __ocfs2_global_write_info(sb, type);
401 ocfs2_qinfo_unlock(info, 1);
405 /* Read in information from global quota file and acquire a reference to it.
406 * dquot_acquire() has already started the transaction and locked quota file */
407 int ocfs2_global_read_dquot(struct dquot *dquot)
409 int err, err2, ex = 0;
410 struct ocfs2_mem_dqinfo *info =
411 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
413 err = ocfs2_qinfo_lock(info, 0);
416 err = qtree_read_dquot(&info->dqi_gi, dquot);
419 OCFS2_DQUOT(dquot)->dq_use_count++;
420 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
421 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
422 if (!dquot->dq_off) { /* No real quota entry? */
423 /* Upgrade to exclusive lock for allocation */
424 err = ocfs2_qinfo_lock(info, 1);
429 err = qtree_write_dquot(&info->dqi_gi, dquot);
430 if (ex && info_dirty(sb_dqinfo(dquot->dq_sb, dquot->dq_type))) {
431 err2 = __ocfs2_global_write_info(dquot->dq_sb, dquot->dq_type);
437 ocfs2_qinfo_unlock(info, 1);
438 ocfs2_qinfo_unlock(info, 0);
445 /* Sync local information about quota modifications with global quota file.
446 * Caller must have started the transaction and obtained exclusive lock for
447 * global quota file inode */
448 int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
451 struct super_block *sb = dquot->dq_sb;
452 int type = dquot->dq_type;
453 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
454 struct ocfs2_global_disk_dqblk dqblk;
455 s64 spacechange, inodechange;
456 time_t olditime, oldbtime;
458 err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
459 sizeof(struct ocfs2_global_disk_dqblk),
461 if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
463 mlog(ML_ERROR, "Short read from global quota file "
470 /* Update space and inode usage. Get also other information from
471 * global quota file so that we don't overwrite any changes there.
473 spin_lock(&dq_data_lock);
474 spacechange = dquot->dq_dqb.dqb_curspace -
475 OCFS2_DQUOT(dquot)->dq_origspace;
476 inodechange = dquot->dq_dqb.dqb_curinodes -
477 OCFS2_DQUOT(dquot)->dq_originodes;
478 olditime = dquot->dq_dqb.dqb_itime;
479 oldbtime = dquot->dq_dqb.dqb_btime;
480 ocfs2_global_disk2memdqb(dquot, &dqblk);
481 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n",
482 dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange,
483 dquot->dq_dqb.dqb_curinodes, (long long)inodechange);
484 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
485 dquot->dq_dqb.dqb_curspace += spacechange;
486 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
487 dquot->dq_dqb.dqb_curinodes += inodechange;
488 /* Set properly space grace time... */
489 if (dquot->dq_dqb.dqb_bsoftlimit &&
490 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
491 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
493 if (dquot->dq_dqb.dqb_btime > 0)
494 dquot->dq_dqb.dqb_btime =
495 min(dquot->dq_dqb.dqb_btime, oldbtime);
497 dquot->dq_dqb.dqb_btime = oldbtime;
500 dquot->dq_dqb.dqb_btime = 0;
501 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
503 /* Set properly inode grace time... */
504 if (dquot->dq_dqb.dqb_isoftlimit &&
505 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
506 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
508 if (dquot->dq_dqb.dqb_itime > 0)
509 dquot->dq_dqb.dqb_itime =
510 min(dquot->dq_dqb.dqb_itime, olditime);
512 dquot->dq_dqb.dqb_itime = olditime;
515 dquot->dq_dqb.dqb_itime = 0;
516 clear_bit(DQ_INODES_B, &dquot->dq_flags);
518 /* All information is properly updated, clear the flags */
519 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
520 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
521 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
522 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
523 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
524 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
525 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
526 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
527 spin_unlock(&dq_data_lock);
528 err = ocfs2_qinfo_lock(info, freeing);
530 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write"
531 " (type=%d, id=%u)\n", dquot->dq_type,
532 (unsigned)dquot->dq_id);
536 OCFS2_DQUOT(dquot)->dq_use_count--;
537 err = qtree_write_dquot(&info->dqi_gi, dquot);
540 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
541 err = qtree_release_dquot(&info->dqi_gi, dquot);
542 if (info_dirty(sb_dqinfo(sb, type))) {
543 err2 = __ocfs2_global_write_info(sb, type);
549 ocfs2_qinfo_unlock(info, freeing);
557 * Functions for periodic syncing of dquots with global file
559 static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
562 struct super_block *sb = dquot->dq_sb;
563 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
564 struct ocfs2_super *osb = OCFS2_SB(sb);
567 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id,
568 dquot->dq_type, type, sb->s_id);
569 if (type != dquot->dq_type)
571 status = ocfs2_lock_global_qf(oinfo, 1);
575 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
576 if (IS_ERR(handle)) {
577 status = PTR_ERR(handle);
581 mutex_lock(&sb_dqopt(sb)->dqio_mutex);
582 status = ocfs2_sync_dquot(dquot);
583 mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
586 /* We have to write local structure as well... */
587 dquot_mark_dquot_dirty(dquot);
588 status = dquot_commit(dquot);
591 ocfs2_commit_trans(osb, handle);
593 ocfs2_unlock_global_qf(oinfo, 1);
599 static void qsync_work_fn(struct work_struct *work)
601 struct ocfs2_mem_dqinfo *oinfo = container_of(work,
602 struct ocfs2_mem_dqinfo,
604 struct super_block *sb = oinfo->dqi_gqinode->i_sb;
606 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
607 queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work,
608 oinfo->dqi_syncjiff);
612 * Wrappers for generic quota functions
615 static int ocfs2_write_dquot(struct dquot *dquot)
618 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
621 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
623 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
624 if (IS_ERR(handle)) {
625 status = PTR_ERR(handle);
629 status = dquot_commit(dquot);
630 ocfs2_commit_trans(osb, handle);
636 int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
638 struct ocfs2_mem_dqinfo *oinfo;
639 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
640 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
642 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
645 oinfo = sb_dqinfo(sb, type)->dqi_priv;
646 /* We modify tree, leaf block, global info, local chunk header,
647 * global and local inode */
648 return oinfo->dqi_gi.dqi_qtree_depth + 2 + 1 +
649 2 * OCFS2_INODE_UPDATE_CREDITS;
652 static int ocfs2_release_dquot(struct dquot *dquot)
655 struct ocfs2_mem_dqinfo *oinfo =
656 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
657 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
660 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
662 status = ocfs2_lock_global_qf(oinfo, 1);
665 handle = ocfs2_start_trans(osb,
666 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
667 if (IS_ERR(handle)) {
668 status = PTR_ERR(handle);
672 status = dquot_release(dquot);
673 ocfs2_commit_trans(osb, handle);
675 ocfs2_unlock_global_qf(oinfo, 1);
681 int ocfs2_calc_qinit_credits(struct super_block *sb, int type)
683 struct ocfs2_mem_dqinfo *oinfo;
684 int features[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
685 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA };
686 struct ocfs2_dinode *lfe, *gfe;
688 if (!OCFS2_HAS_RO_COMPAT_FEATURE(sb, features[type]))
691 oinfo = sb_dqinfo(sb, type)->dqi_priv;
692 gfe = (struct ocfs2_dinode *)oinfo->dqi_gqi_bh->b_data;
693 lfe = (struct ocfs2_dinode *)oinfo->dqi_lqi_bh->b_data;
694 /* We can extend local file + global file. In local file we
695 * can modify info, chunk header block and dquot block. In
696 * global file we can modify info, tree and leaf block */
697 return ocfs2_calc_extend_credits(sb, &lfe->id2.i_list, 0) +
698 ocfs2_calc_extend_credits(sb, &gfe->id2.i_list, 0) +
699 3 + oinfo->dqi_gi.dqi_qtree_depth + 2;
702 static int ocfs2_acquire_dquot(struct dquot *dquot)
705 struct ocfs2_mem_dqinfo *oinfo =
706 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
707 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
710 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type);
711 /* We need an exclusive lock, because we're going to update use count
712 * and instantiate possibly new dquot structure */
713 status = ocfs2_lock_global_qf(oinfo, 1);
716 handle = ocfs2_start_trans(osb,
717 ocfs2_calc_qinit_credits(dquot->dq_sb, dquot->dq_type));
718 if (IS_ERR(handle)) {
719 status = PTR_ERR(handle);
723 status = dquot_acquire(dquot);
724 ocfs2_commit_trans(osb, handle);
726 ocfs2_unlock_global_qf(oinfo, 1);
732 static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
734 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
735 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
736 (1 << (DQ_LASTSET_B + QIF_INODES_B)) |
737 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
738 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
739 (1 << (DQ_LASTSET_B + QIF_ITIME_B));
742 struct super_block *sb = dquot->dq_sb;
743 int type = dquot->dq_type;
744 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
746 struct ocfs2_super *osb = OCFS2_SB(sb);
748 mlog_entry("id=%u, type=%d", dquot->dq_id, type);
749 dquot_mark_dquot_dirty(dquot);
751 /* In case user set some limits, sync dquot immediately to global
752 * quota file so that information propagates quicker */
753 spin_lock(&dq_data_lock);
754 if (dquot->dq_flags & mask)
756 spin_unlock(&dq_data_lock);
758 status = ocfs2_write_dquot(dquot);
761 status = ocfs2_lock_global_qf(oinfo, 1);
764 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
765 if (IS_ERR(handle)) {
766 status = PTR_ERR(handle);
770 status = ocfs2_sync_dquot(dquot);
775 /* Now write updated local dquot structure */
776 status = dquot_commit(dquot);
778 ocfs2_commit_trans(osb, handle);
780 ocfs2_unlock_global_qf(oinfo, 1);
786 /* This should happen only after set_dqinfo(). */
787 static int ocfs2_write_info(struct super_block *sb, int type)
791 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
795 status = ocfs2_lock_global_qf(oinfo, 1);
798 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
799 if (IS_ERR(handle)) {
800 status = PTR_ERR(handle);
804 status = dquot_commit_info(sb, type);
805 ocfs2_commit_trans(OCFS2_SB(sb), handle);
807 ocfs2_unlock_global_qf(oinfo, 1);
813 /* This is difficult. We have to lock quota inode and start transaction
814 * in this function but we don't want to take the penalty of exlusive
815 * quota file lock when we are just going to use cached structures. So
816 * we just take read lock check whether we have dquot cached and if so,
817 * we don't have to take the write lock... */
818 static int ocfs2_dquot_initialize(struct inode *inode, int type)
820 handle_t *handle = NULL;
822 struct super_block *sb = inode->i_sb;
823 struct ocfs2_mem_dqinfo *oinfo;
830 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
831 if (type != -1 && cnt != type)
833 if (!sb_has_quota_active(sb, cnt))
835 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
836 status = ocfs2_lock_global_qf(oinfo, 0);
839 /* This is just a performance optimization not a reliable test.
840 * Since we hold an inode lock, noone can actually release
841 * the structure until we are finished with initialization. */
842 if (inode->i_dquot[cnt] != NODQUOT) {
843 ocfs2_unlock_global_qf(oinfo, 0);
846 /* When we have inode lock, we know that no dquot_release() can
847 * run and thus we can safely check whether we need to
848 * read+modify global file to get quota information or whether
849 * our node already has it. */
852 else if (cnt == GRPQUOTA)
856 /* Obtain exclusion from quota off... */
857 down_write(&sb_dqopt(sb)->dqptr_sem);
858 exclusive = !dquot_is_cached(sb, id, cnt);
859 up_write(&sb_dqopt(sb)->dqptr_sem);
861 status = ocfs2_lock_global_qf(oinfo, 1);
867 handle = ocfs2_start_trans(OCFS2_SB(sb),
868 ocfs2_calc_qinit_credits(sb, cnt));
869 if (IS_ERR(handle)) {
870 status = PTR_ERR(handle);
875 dquot_initialize(inode, cnt);
877 ocfs2_commit_trans(OCFS2_SB(sb), handle);
878 ocfs2_unlock_global_qf(oinfo, 1);
880 ocfs2_unlock_global_qf(oinfo, 0);
886 ocfs2_unlock_global_qf(oinfo, 1);
887 ocfs2_unlock_global_qf(oinfo, 0);
893 static int ocfs2_dquot_drop_slow(struct inode *inode)
897 int got_lock[MAXQUOTAS] = {0, 0};
899 struct super_block *sb = inode->i_sb;
900 struct ocfs2_mem_dqinfo *oinfo;
902 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
903 if (!sb_has_quota_active(sb, cnt))
905 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
906 status = ocfs2_lock_global_qf(oinfo, 1);
911 handle = ocfs2_start_trans(OCFS2_SB(sb),
912 ocfs2_calc_qinit_credits(sb, USRQUOTA) +
913 ocfs2_calc_qinit_credits(sb, GRPQUOTA));
914 if (IS_ERR(handle)) {
915 status = PTR_ERR(handle);
920 ocfs2_commit_trans(OCFS2_SB(sb), handle);
922 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
924 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
925 ocfs2_unlock_global_qf(oinfo, 1);
930 /* See the comment before ocfs2_dquot_initialize. */
931 static int ocfs2_dquot_drop(struct inode *inode)
934 struct super_block *sb = inode->i_sb;
935 struct ocfs2_mem_dqinfo *oinfo;
938 int got_lock[MAXQUOTAS] = {0, 0};
941 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
942 if (!sb_has_quota_active(sb, cnt))
944 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
945 status = ocfs2_lock_global_qf(oinfo, 0);
950 /* Lock against anyone releasing references so that when when we check
951 * we know we are not going to be last ones to release dquot */
952 down_write(&sb_dqopt(sb)->dqptr_sem);
953 /* Urgh, this is a terrible hack :( */
954 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
955 if (inode->i_dquot[cnt] != NODQUOT &&
956 atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
962 dquot_drop_locked(inode);
963 up_write(&sb_dqopt(sb)->dqptr_sem);
965 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
967 oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
968 ocfs2_unlock_global_qf(oinfo, 0);
970 /* In case we bailed out because we had to do expensive locking
973 status = ocfs2_dquot_drop_slow(inode);
978 static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
980 struct ocfs2_dquot *dquot =
981 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
985 return &dquot->dq_dquot;
988 static void ocfs2_destroy_dquot(struct dquot *dquot)
990 kmem_cache_free(ocfs2_dquot_cachep, dquot);
993 struct dquot_operations ocfs2_quota_operations = {
994 .initialize = ocfs2_dquot_initialize,
995 .drop = ocfs2_dquot_drop,
996 .alloc_space = dquot_alloc_space,
997 .alloc_inode = dquot_alloc_inode,
998 .free_space = dquot_free_space,
999 .free_inode = dquot_free_inode,
1000 .transfer = dquot_transfer,
1001 .write_dquot = ocfs2_write_dquot,
1002 .acquire_dquot = ocfs2_acquire_dquot,
1003 .release_dquot = ocfs2_release_dquot,
1004 .mark_dirty = ocfs2_mark_dquot_dirty,
1005 .write_info = ocfs2_write_info,
1006 .alloc_dquot = ocfs2_alloc_dquot,
1007 .destroy_dquot = ocfs2_destroy_dquot,
1010 int ocfs2_quota_setup(void)
1012 ocfs2_quota_wq = create_workqueue("o2quot");
1013 if (!ocfs2_quota_wq)
1018 void ocfs2_quota_shutdown(void)
1020 if (ocfs2_quota_wq) {
1021 flush_workqueue(ocfs2_quota_wq);
1022 destroy_workqueue(ocfs2_quota_wq);
1023 ocfs2_quota_wq = NULL;