Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/sh-2.6
[linux-2.6] / fs / gfs2 / quota.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 /*
11  * Quota change tags are associated with each transaction that allocates or
12  * deallocates space.  Those changes are accumulated locally to each node (in a
13  * per-node file) and then are periodically synced to the quota file.  This
14  * avoids the bottleneck of constantly touching the quota file, but introduces
15  * fuzziness in the current usage value of IDs that are being used on different
16  * nodes in the cluster simultaneously.  So, it is possible for a user on
17  * multiple nodes to overrun their quota, but that overrun is controlable.
18  * Since quota tags are part of transactions, there is no need to a quota check
19  * program to be run on node crashes or anything like that.
20  *
21  * There are couple of knobs that let the administrator manage the quota
22  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
23  * sitting on one node before being synced to the quota file.  (The default is
24  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
25  * of quota file syncs increases as the user moves closer to their limit.  The
26  * more frequent the syncs, the more accurate the quota enforcement, but that
27  * means that there is more contention between the nodes for the quota file.
28  * The default value is one.  This sets the maximum theoretical quota overrun
29  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
30  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
31  * number greater than one makes quota syncs more frequent and reduces the
32  * maximum overrun.  Numbers less than one (but greater than zero) make quota
33  * syncs less frequent.
34  *
35  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36  * the quota file, so it is not being constantly read.
37  */
38
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
49
50 #include "gfs2.h"
51 #include "incore.h"
52 #include "bmap.h"
53 #include "glock.h"
54 #include "glops.h"
55 #include "log.h"
56 #include "meta_io.h"
57 #include "quota.h"
58 #include "rgrp.h"
59 #include "super.h"
60 #include "trans.h"
61 #include "inode.h"
62 #include "ops_file.h"
63 #include "ops_address.h"
64 #include "util.h"
65
66 #define QUOTA_USER 1
67 #define QUOTA_GROUP 0
68
69 struct gfs2_quota_host {
70         u64 qu_limit;
71         u64 qu_warn;
72         s64 qu_value;
73 };
74
75 struct gfs2_quota_change_host {
76         u64 qc_change;
77         u32 qc_flags; /* GFS2_QCF_... */
78         u32 qc_id;
79 };
80
81 static u64 qd2offset(struct gfs2_quota_data *qd)
82 {
83         u64 offset;
84
85         offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
86         offset *= sizeof(struct gfs2_quota);
87
88         return offset;
89 }
90
91 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
92                     struct gfs2_quota_data **qdp)
93 {
94         struct gfs2_quota_data *qd;
95         int error;
96
97         qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
98         if (!qd)
99                 return -ENOMEM;
100
101         qd->qd_count = 1;
102         qd->qd_id = id;
103         if (user)
104                 set_bit(QDF_USER, &qd->qd_flags);
105         qd->qd_slot = -1;
106
107         error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
108                               &gfs2_quota_glops, CREATE, &qd->qd_gl);
109         if (error)
110                 goto fail;
111
112         error = gfs2_lvb_hold(qd->qd_gl);
113         gfs2_glock_put(qd->qd_gl);
114         if (error)
115                 goto fail;
116
117         *qdp = qd;
118
119         return 0;
120
121 fail:
122         kfree(qd);
123         return error;
124 }
125
126 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
127                   struct gfs2_quota_data **qdp)
128 {
129         struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
130         int error, found;
131
132         *qdp = NULL;
133
134         for (;;) {
135                 found = 0;
136                 spin_lock(&sdp->sd_quota_spin);
137                 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
138                         if (qd->qd_id == id &&
139                             !test_bit(QDF_USER, &qd->qd_flags) == !user) {
140                                 qd->qd_count++;
141                                 found = 1;
142                                 break;
143                         }
144                 }
145
146                 if (!found)
147                         qd = NULL;
148
149                 if (!qd && new_qd) {
150                         qd = new_qd;
151                         list_add(&qd->qd_list, &sdp->sd_quota_list);
152                         atomic_inc(&sdp->sd_quota_count);
153                         new_qd = NULL;
154                 }
155
156                 spin_unlock(&sdp->sd_quota_spin);
157
158                 if (qd || !create) {
159                         if (new_qd) {
160                                 gfs2_lvb_unhold(new_qd->qd_gl);
161                                 kfree(new_qd);
162                         }
163                         *qdp = qd;
164                         return 0;
165                 }
166
167                 error = qd_alloc(sdp, user, id, &new_qd);
168                 if (error)
169                         return error;
170         }
171 }
172
173 static void qd_hold(struct gfs2_quota_data *qd)
174 {
175         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
176
177         spin_lock(&sdp->sd_quota_spin);
178         gfs2_assert(sdp, qd->qd_count);
179         qd->qd_count++;
180         spin_unlock(&sdp->sd_quota_spin);
181 }
182
183 static void qd_put(struct gfs2_quota_data *qd)
184 {
185         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
186         spin_lock(&sdp->sd_quota_spin);
187         gfs2_assert(sdp, qd->qd_count);
188         if (!--qd->qd_count)
189                 qd->qd_last_touched = jiffies;
190         spin_unlock(&sdp->sd_quota_spin);
191 }
192
193 static int slot_get(struct gfs2_quota_data *qd)
194 {
195         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
196         unsigned int c, o = 0, b;
197         unsigned char byte = 0;
198
199         spin_lock(&sdp->sd_quota_spin);
200
201         if (qd->qd_slot_count++) {
202                 spin_unlock(&sdp->sd_quota_spin);
203                 return 0;
204         }
205
206         for (c = 0; c < sdp->sd_quota_chunks; c++)
207                 for (o = 0; o < PAGE_SIZE; o++) {
208                         byte = sdp->sd_quota_bitmap[c][o];
209                         if (byte != 0xFF)
210                                 goto found;
211                 }
212
213         goto fail;
214
215 found:
216         for (b = 0; b < 8; b++)
217                 if (!(byte & (1 << b)))
218                         break;
219         qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
220
221         if (qd->qd_slot >= sdp->sd_quota_slots)
222                 goto fail;
223
224         sdp->sd_quota_bitmap[c][o] |= 1 << b;
225
226         spin_unlock(&sdp->sd_quota_spin);
227
228         return 0;
229
230 fail:
231         qd->qd_slot_count--;
232         spin_unlock(&sdp->sd_quota_spin);
233         return -ENOSPC;
234 }
235
236 static void slot_hold(struct gfs2_quota_data *qd)
237 {
238         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
239
240         spin_lock(&sdp->sd_quota_spin);
241         gfs2_assert(sdp, qd->qd_slot_count);
242         qd->qd_slot_count++;
243         spin_unlock(&sdp->sd_quota_spin);
244 }
245
246 static void slot_put(struct gfs2_quota_data *qd)
247 {
248         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
249
250         spin_lock(&sdp->sd_quota_spin);
251         gfs2_assert(sdp, qd->qd_slot_count);
252         if (!--qd->qd_slot_count) {
253                 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
254                 qd->qd_slot = -1;
255         }
256         spin_unlock(&sdp->sd_quota_spin);
257 }
258
259 static int bh_get(struct gfs2_quota_data *qd)
260 {
261         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
262         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
263         unsigned int block, offset;
264         struct buffer_head *bh;
265         int error;
266         struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
267
268         mutex_lock(&sdp->sd_quota_mutex);
269
270         if (qd->qd_bh_count++) {
271                 mutex_unlock(&sdp->sd_quota_mutex);
272                 return 0;
273         }
274
275         block = qd->qd_slot / sdp->sd_qc_per_block;
276         offset = qd->qd_slot % sdp->sd_qc_per_block;;
277
278         bh_map.b_size = 1 << ip->i_inode.i_blkbits;
279         error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);
280         if (error)
281                 goto fail;
282         error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
283         if (error)
284                 goto fail;
285         error = -EIO;
286         if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
287                 goto fail_brelse;
288
289         qd->qd_bh = bh;
290         qd->qd_bh_qc = (struct gfs2_quota_change *)
291                 (bh->b_data + sizeof(struct gfs2_meta_header) +
292                  offset * sizeof(struct gfs2_quota_change));
293
294         mutex_unlock(&sdp->sd_quota_mutex);
295
296         return 0;
297
298 fail_brelse:
299         brelse(bh);
300 fail:
301         qd->qd_bh_count--;
302         mutex_unlock(&sdp->sd_quota_mutex);
303         return error;
304 }
305
306 static void bh_put(struct gfs2_quota_data *qd)
307 {
308         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
309
310         mutex_lock(&sdp->sd_quota_mutex);
311         gfs2_assert(sdp, qd->qd_bh_count);
312         if (!--qd->qd_bh_count) {
313                 brelse(qd->qd_bh);
314                 qd->qd_bh = NULL;
315                 qd->qd_bh_qc = NULL;
316         }
317         mutex_unlock(&sdp->sd_quota_mutex);
318 }
319
320 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
321 {
322         struct gfs2_quota_data *qd = NULL;
323         int error;
324         int found = 0;
325
326         *qdp = NULL;
327
328         if (sdp->sd_vfs->s_flags & MS_RDONLY)
329                 return 0;
330
331         spin_lock(&sdp->sd_quota_spin);
332
333         list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
334                 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
335                     !test_bit(QDF_CHANGE, &qd->qd_flags) ||
336                     qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
337                         continue;
338
339                 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
340
341                 set_bit(QDF_LOCKED, &qd->qd_flags);
342                 gfs2_assert_warn(sdp, qd->qd_count);
343                 qd->qd_count++;
344                 qd->qd_change_sync = qd->qd_change;
345                 gfs2_assert_warn(sdp, qd->qd_slot_count);
346                 qd->qd_slot_count++;
347                 found = 1;
348
349                 break;
350         }
351
352         if (!found)
353                 qd = NULL;
354
355         spin_unlock(&sdp->sd_quota_spin);
356
357         if (qd) {
358                 gfs2_assert_warn(sdp, qd->qd_change_sync);
359                 error = bh_get(qd);
360                 if (error) {
361                         clear_bit(QDF_LOCKED, &qd->qd_flags);
362                         slot_put(qd);
363                         qd_put(qd);
364                         return error;
365                 }
366         }
367
368         *qdp = qd;
369
370         return 0;
371 }
372
373 static int qd_trylock(struct gfs2_quota_data *qd)
374 {
375         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
376
377         if (sdp->sd_vfs->s_flags & MS_RDONLY)
378                 return 0;
379
380         spin_lock(&sdp->sd_quota_spin);
381
382         if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
383             !test_bit(QDF_CHANGE, &qd->qd_flags)) {
384                 spin_unlock(&sdp->sd_quota_spin);
385                 return 0;
386         }
387
388         list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
389
390         set_bit(QDF_LOCKED, &qd->qd_flags);
391         gfs2_assert_warn(sdp, qd->qd_count);
392         qd->qd_count++;
393         qd->qd_change_sync = qd->qd_change;
394         gfs2_assert_warn(sdp, qd->qd_slot_count);
395         qd->qd_slot_count++;
396
397         spin_unlock(&sdp->sd_quota_spin);
398
399         gfs2_assert_warn(sdp, qd->qd_change_sync);
400         if (bh_get(qd)) {
401                 clear_bit(QDF_LOCKED, &qd->qd_flags);
402                 slot_put(qd);
403                 qd_put(qd);
404                 return 0;
405         }
406
407         return 1;
408 }
409
410 static void qd_unlock(struct gfs2_quota_data *qd)
411 {
412         gfs2_assert_warn(qd->qd_gl->gl_sbd,
413                          test_bit(QDF_LOCKED, &qd->qd_flags));
414         clear_bit(QDF_LOCKED, &qd->qd_flags);
415         bh_put(qd);
416         slot_put(qd);
417         qd_put(qd);
418 }
419
420 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
421                     struct gfs2_quota_data **qdp)
422 {
423         int error;
424
425         error = qd_get(sdp, user, id, create, qdp);
426         if (error)
427                 return error;
428
429         error = slot_get(*qdp);
430         if (error)
431                 goto fail;
432
433         error = bh_get(*qdp);
434         if (error)
435                 goto fail_slot;
436
437         return 0;
438
439 fail_slot:
440         slot_put(*qdp);
441 fail:
442         qd_put(*qdp);
443         return error;
444 }
445
446 static void qdsb_put(struct gfs2_quota_data *qd)
447 {
448         bh_put(qd);
449         slot_put(qd);
450         qd_put(qd);
451 }
452
453 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
454 {
455         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
456         struct gfs2_alloc *al = &ip->i_alloc;
457         struct gfs2_quota_data **qd = al->al_qd;
458         int error;
459
460         if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
461             gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
462                 return -EIO;
463
464         if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
465                 return 0;
466
467         error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
468         if (error)
469                 goto out;
470         al->al_qd_num++;
471         qd++;
472
473         error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
474         if (error)
475                 goto out;
476         al->al_qd_num++;
477         qd++;
478
479         if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
480                 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
481                 if (error)
482                         goto out;
483                 al->al_qd_num++;
484                 qd++;
485         }
486
487         if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
488                 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
489                 if (error)
490                         goto out;
491                 al->al_qd_num++;
492                 qd++;
493         }
494
495 out:
496         if (error)
497                 gfs2_quota_unhold(ip);
498         return error;
499 }
500
501 void gfs2_quota_unhold(struct gfs2_inode *ip)
502 {
503         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
504         struct gfs2_alloc *al = &ip->i_alloc;
505         unsigned int x;
506
507         gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
508
509         for (x = 0; x < al->al_qd_num; x++) {
510                 qdsb_put(al->al_qd[x]);
511                 al->al_qd[x] = NULL;
512         }
513         al->al_qd_num = 0;
514 }
515
516 static int sort_qd(const void *a, const void *b)
517 {
518         const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
519         const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
520
521         if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
522             !test_bit(QDF_USER, &qd_b->qd_flags)) {
523                 if (test_bit(QDF_USER, &qd_a->qd_flags))
524                         return -1;
525                 else
526                         return 1;
527         }
528         if (qd_a->qd_id < qd_b->qd_id)
529                 return -1;
530         if (qd_a->qd_id > qd_b->qd_id)
531                 return 1;
532
533         return 0;
534 }
535
536 static void do_qc(struct gfs2_quota_data *qd, s64 change)
537 {
538         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
539         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
540         struct gfs2_quota_change *qc = qd->qd_bh_qc;
541         s64 x;
542
543         mutex_lock(&sdp->sd_quota_mutex);
544         gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
545
546         if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
547                 qc->qc_change = 0;
548                 qc->qc_flags = 0;
549                 if (test_bit(QDF_USER, &qd->qd_flags))
550                         qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
551                 qc->qc_id = cpu_to_be32(qd->qd_id);
552         }
553
554         x = be64_to_cpu(qc->qc_change) + change;
555         qc->qc_change = cpu_to_be64(x);
556
557         spin_lock(&sdp->sd_quota_spin);
558         qd->qd_change = x;
559         spin_unlock(&sdp->sd_quota_spin);
560
561         if (!x) {
562                 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
563                 clear_bit(QDF_CHANGE, &qd->qd_flags);
564                 qc->qc_flags = 0;
565                 qc->qc_id = 0;
566                 slot_put(qd);
567                 qd_put(qd);
568         } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
569                 qd_hold(qd);
570                 slot_hold(qd);
571         }
572
573         mutex_unlock(&sdp->sd_quota_mutex);
574 }
575
576 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
577 {
578         const struct gfs2_quota *str = buf;
579
580         qu->qu_limit = be64_to_cpu(str->qu_limit);
581         qu->qu_warn = be64_to_cpu(str->qu_warn);
582         qu->qu_value = be64_to_cpu(str->qu_value);
583 }
584
585 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
586 {
587         struct gfs2_quota *str = buf;
588
589         str->qu_limit = cpu_to_be64(qu->qu_limit);
590         str->qu_warn = cpu_to_be64(qu->qu_warn);
591         str->qu_value = cpu_to_be64(qu->qu_value);
592         memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
593 }
594
595 /**
596  * gfs2_adjust_quota
597  *
598  * This function was mostly borrowed from gfs2_block_truncate_page which was
599  * in turn mostly borrowed from ext3
600  */
601 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
602                              s64 change, struct gfs2_quota_data *qd)
603 {
604         struct inode *inode = &ip->i_inode;
605         struct address_space *mapping = inode->i_mapping;
606         unsigned long index = loc >> PAGE_CACHE_SHIFT;
607         unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
608         unsigned blocksize, iblock, pos;
609         struct buffer_head *bh;
610         struct page *page;
611         void *kaddr;
612         char *ptr;
613         struct gfs2_quota_host qp;
614         s64 value;
615         int err = -EIO;
616
617         page = grab_cache_page(mapping, index);
618         if (!page)
619                 return -ENOMEM;
620
621         blocksize = inode->i_sb->s_blocksize;
622         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
623
624         if (!page_has_buffers(page))
625                 create_empty_buffers(page, blocksize, 0);
626
627         bh = page_buffers(page);
628         pos = blocksize;
629         while (offset >= pos) {
630                 bh = bh->b_this_page;
631                 iblock++;
632                 pos += blocksize;
633         }
634
635         if (!buffer_mapped(bh)) {
636                 gfs2_get_block(inode, iblock, bh, 1);
637                 if (!buffer_mapped(bh))
638                         goto unlock;
639         }
640
641         if (PageUptodate(page))
642                 set_buffer_uptodate(bh);
643
644         if (!buffer_uptodate(bh)) {
645                 ll_rw_block(READ_META, 1, &bh);
646                 wait_on_buffer(bh);
647                 if (!buffer_uptodate(bh))
648                         goto unlock;
649         }
650
651         gfs2_trans_add_bh(ip->i_gl, bh, 0);
652
653         kaddr = kmap_atomic(page, KM_USER0);
654         ptr = kaddr + offset;
655         gfs2_quota_in(&qp, ptr);
656         qp.qu_value += change;
657         value = qp.qu_value;
658         gfs2_quota_out(&qp, ptr);
659         flush_dcache_page(page);
660         kunmap_atomic(kaddr, KM_USER0);
661         err = 0;
662         qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
663         qd->qd_qb.qb_value = cpu_to_be64(value);
664         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
665         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
666 unlock:
667         unlock_page(page);
668         page_cache_release(page);
669         return err;
670 }
671
672 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
673 {
674         struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
675         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
676         unsigned int data_blocks, ind_blocks;
677         struct gfs2_holder *ghs, i_gh;
678         unsigned int qx, x;
679         struct gfs2_quota_data *qd;
680         loff_t offset;
681         unsigned int nalloc = 0;
682         struct gfs2_alloc *al = NULL;
683         int error;
684
685         gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
686                               &data_blocks, &ind_blocks);
687
688         ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
689         if (!ghs)
690                 return -ENOMEM;
691
692         sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
693         for (qx = 0; qx < num_qd; qx++) {
694                 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
695                                            LM_ST_EXCLUSIVE,
696                                            GL_NOCACHE, &ghs[qx]);
697                 if (error)
698                         goto out;
699         }
700
701         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
702         if (error)
703                 goto out;
704
705         for (x = 0; x < num_qd; x++) {
706                 int alloc_required;
707
708                 offset = qd2offset(qda[x]);
709                 error = gfs2_write_alloc_required(ip, offset,
710                                                   sizeof(struct gfs2_quota),
711                                                   &alloc_required);
712                 if (error)
713                         goto out_gunlock;
714                 if (alloc_required)
715                         nalloc++;
716         }
717
718         if (nalloc) {
719                 al = gfs2_alloc_get(ip);
720
721                 al->al_requested = nalloc * (data_blocks + ind_blocks);
722
723                 error = gfs2_inplace_reserve(ip);
724                 if (error)
725                         goto out_alloc;
726
727                 error = gfs2_trans_begin(sdp,
728                                          al->al_rgd->rd_length +
729                                          num_qd * data_blocks +
730                                          nalloc * ind_blocks +
731                                          RES_DINODE + num_qd +
732                                          RES_STATFS, 0);
733                 if (error)
734                         goto out_ipres;
735         } else {
736                 error = gfs2_trans_begin(sdp,
737                                          num_qd * data_blocks +
738                                          RES_DINODE + num_qd, 0);
739                 if (error)
740                         goto out_gunlock;
741         }
742
743         for (x = 0; x < num_qd; x++) {
744                 qd = qda[x];
745                 offset = qd2offset(qd);
746                 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
747                                           (struct gfs2_quota_data *)
748                                           qd);
749                 if (error)
750                         goto out_end_trans;
751
752                 do_qc(qd, -qd->qd_change_sync);
753         }
754
755         error = 0;
756
757 out_end_trans:
758         gfs2_trans_end(sdp);
759 out_ipres:
760         if (nalloc)
761                 gfs2_inplace_release(ip);
762 out_alloc:
763         if (nalloc)
764                 gfs2_alloc_put(ip);
765 out_gunlock:
766         gfs2_glock_dq_uninit(&i_gh);
767 out:
768         while (qx--)
769                 gfs2_glock_dq_uninit(&ghs[qx]);
770         kfree(ghs);
771         gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
772         return error;
773 }
774
775 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
776                     struct gfs2_holder *q_gh)
777 {
778         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
779         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
780         struct gfs2_holder i_gh;
781         struct gfs2_quota_host q;
782         char buf[sizeof(struct gfs2_quota)];
783         struct file_ra_state ra_state;
784         int error;
785         struct gfs2_quota_lvb *qlvb;
786
787         file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
788 restart:
789         error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
790         if (error)
791                 return error;
792
793         qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
794
795         if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
796                 loff_t pos;
797                 gfs2_glock_dq_uninit(q_gh);
798                 error = gfs2_glock_nq_init(qd->qd_gl,
799                                           LM_ST_EXCLUSIVE, GL_NOCACHE,
800                                           q_gh);
801                 if (error)
802                         return error;
803
804                 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
805                 if (error)
806                         goto fail;
807
808                 memset(buf, 0, sizeof(struct gfs2_quota));
809                 pos = qd2offset(qd);
810                 error = gfs2_internal_read(ip, &ra_state, buf,
811                                            &pos, sizeof(struct gfs2_quota));
812                 if (error < 0)
813                         goto fail_gunlock;
814
815                 gfs2_glock_dq_uninit(&i_gh);
816
817
818                 gfs2_quota_in(&q, buf);
819                 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
820                 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
821                 qlvb->__pad = 0;
822                 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
823                 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
824                 qlvb->qb_value = cpu_to_be64(q.qu_value);
825                 qd->qd_qb = *qlvb;
826
827                 if (gfs2_glock_is_blocking(qd->qd_gl)) {
828                         gfs2_glock_dq_uninit(q_gh);
829                         force_refresh = 0;
830                         goto restart;
831                 }
832         }
833
834         return 0;
835
836 fail_gunlock:
837         gfs2_glock_dq_uninit(&i_gh);
838 fail:
839         gfs2_glock_dq_uninit(q_gh);
840         return error;
841 }
842
843 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
844 {
845         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
846         struct gfs2_alloc *al = &ip->i_alloc;
847         unsigned int x;
848         int error = 0;
849
850         gfs2_quota_hold(ip, uid, gid);
851
852         if (capable(CAP_SYS_RESOURCE) ||
853             sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
854                 return 0;
855
856         sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
857              sort_qd, NULL);
858
859         for (x = 0; x < al->al_qd_num; x++) {
860                 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
861                 if (error)
862                         break;
863         }
864
865         if (!error)
866                 set_bit(GIF_QD_LOCKED, &ip->i_flags);
867         else {
868                 while (x--)
869                         gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
870                 gfs2_quota_unhold(ip);
871         }
872
873         return error;
874 }
875
876 static int need_sync(struct gfs2_quota_data *qd)
877 {
878         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
879         struct gfs2_tune *gt = &sdp->sd_tune;
880         s64 value;
881         unsigned int num, den;
882         int do_sync = 1;
883
884         if (!qd->qd_qb.qb_limit)
885                 return 0;
886
887         spin_lock(&sdp->sd_quota_spin);
888         value = qd->qd_change;
889         spin_unlock(&sdp->sd_quota_spin);
890
891         spin_lock(&gt->gt_spin);
892         num = gt->gt_quota_scale_num;
893         den = gt->gt_quota_scale_den;
894         spin_unlock(&gt->gt_spin);
895
896         if (value < 0)
897                 do_sync = 0;
898         else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
899                  (s64)be64_to_cpu(qd->qd_qb.qb_limit))
900                 do_sync = 0;
901         else {
902                 value *= gfs2_jindex_size(sdp) * num;
903                 do_div(value, den);
904                 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
905                 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
906                         do_sync = 0;
907         }
908
909         return do_sync;
910 }
911
912 void gfs2_quota_unlock(struct gfs2_inode *ip)
913 {
914         struct gfs2_alloc *al = &ip->i_alloc;
915         struct gfs2_quota_data *qda[4];
916         unsigned int count = 0;
917         unsigned int x;
918
919         if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
920                 goto out;
921
922         for (x = 0; x < al->al_qd_num; x++) {
923                 struct gfs2_quota_data *qd;
924                 int sync;
925
926                 qd = al->al_qd[x];
927                 sync = need_sync(qd);
928
929                 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
930
931                 if (sync && qd_trylock(qd))
932                         qda[count++] = qd;
933         }
934
935         if (count) {
936                 do_sync(count, qda);
937                 for (x = 0; x < count; x++)
938                         qd_unlock(qda[x]);
939         }
940
941 out:
942         gfs2_quota_unhold(ip);
943 }
944
945 #define MAX_LINE 256
946
947 static int print_message(struct gfs2_quota_data *qd, char *type)
948 {
949         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
950
951         printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
952                sdp->sd_fsname, type,
953                (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
954                qd->qd_id);
955
956         return 0;
957 }
958
959 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
960 {
961         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
962         struct gfs2_alloc *al = &ip->i_alloc;
963         struct gfs2_quota_data *qd;
964         s64 value;
965         unsigned int x;
966         int error = 0;
967
968         if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
969                 return 0;
970
971         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
972                 return 0;
973
974         for (x = 0; x < al->al_qd_num; x++) {
975                 qd = al->al_qd[x];
976
977                 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
978                       (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
979                         continue;
980
981                 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
982                 spin_lock(&sdp->sd_quota_spin);
983                 value += qd->qd_change;
984                 spin_unlock(&sdp->sd_quota_spin);
985
986                 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
987                         print_message(qd, "exceeded");
988                         error = -EDQUOT;
989                         break;
990                 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
991                            (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
992                            time_after_eq(jiffies, qd->qd_last_warn +
993                                          gfs2_tune_get(sdp,
994                                                 gt_quota_warn_period) * HZ)) {
995                         error = print_message(qd, "warning");
996                         qd->qd_last_warn = jiffies;
997                 }
998         }
999
1000         return error;
1001 }
1002
1003 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1004                        u32 uid, u32 gid)
1005 {
1006         struct gfs2_alloc *al = &ip->i_alloc;
1007         struct gfs2_quota_data *qd;
1008         unsigned int x;
1009         unsigned int found = 0;
1010
1011         if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1012                 return;
1013         if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1014                 return;
1015
1016         for (x = 0; x < al->al_qd_num; x++) {
1017                 qd = al->al_qd[x];
1018
1019                 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1020                     (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1021                         do_qc(qd, change);
1022                         found++;
1023                 }
1024         }
1025 }
1026
1027 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1028 {
1029         struct gfs2_quota_data **qda;
1030         unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1031         unsigned int num_qd;
1032         unsigned int x;
1033         int error = 0;
1034
1035         sdp->sd_quota_sync_gen++;
1036
1037         qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1038         if (!qda)
1039                 return -ENOMEM;
1040
1041         do {
1042                 num_qd = 0;
1043
1044                 for (;;) {
1045                         error = qd_fish(sdp, qda + num_qd);
1046                         if (error || !qda[num_qd])
1047                                 break;
1048                         if (++num_qd == max_qd)
1049                                 break;
1050                 }
1051
1052                 if (num_qd) {
1053                         if (!error)
1054                                 error = do_sync(num_qd, qda);
1055                         if (!error)
1056                                 for (x = 0; x < num_qd; x++)
1057                                         qda[x]->qd_sync_gen =
1058                                                 sdp->sd_quota_sync_gen;
1059
1060                         for (x = 0; x < num_qd; x++)
1061                                 qd_unlock(qda[x]);
1062                 }
1063         } while (!error && num_qd == max_qd);
1064
1065         kfree(qda);
1066
1067         return error;
1068 }
1069
1070 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1071 {
1072         struct gfs2_quota_data *qd;
1073         struct gfs2_holder q_gh;
1074         int error;
1075
1076         error = qd_get(sdp, user, id, CREATE, &qd);
1077         if (error)
1078                 return error;
1079
1080         error = do_glock(qd, FORCE, &q_gh);
1081         if (!error)
1082                 gfs2_glock_dq_uninit(&q_gh);
1083
1084         qd_put(qd);
1085
1086         return error;
1087 }
1088
1089 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1090 {
1091         const struct gfs2_quota_change *str = buf;
1092
1093         qc->qc_change = be64_to_cpu(str->qc_change);
1094         qc->qc_flags = be32_to_cpu(str->qc_flags);
1095         qc->qc_id = be32_to_cpu(str->qc_id);
1096 }
1097
1098 int gfs2_quota_init(struct gfs2_sbd *sdp)
1099 {
1100         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1101         unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1102         unsigned int x, slot = 0;
1103         unsigned int found = 0;
1104         u64 dblock;
1105         u32 extlen = 0;
1106         int error;
1107
1108         if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1109             ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1110                 gfs2_consist_inode(ip);
1111                 return -EIO;
1112         }
1113         sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1114         sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1115
1116         error = -ENOMEM;
1117
1118         sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1119                                        sizeof(unsigned char *), GFP_KERNEL);
1120         if (!sdp->sd_quota_bitmap)
1121                 return error;
1122
1123         for (x = 0; x < sdp->sd_quota_chunks; x++) {
1124                 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1125                 if (!sdp->sd_quota_bitmap[x])
1126                         goto fail;
1127         }
1128
1129         for (x = 0; x < blocks; x++) {
1130                 struct buffer_head *bh;
1131                 unsigned int y;
1132
1133                 if (!extlen) {
1134                         int new = 0;
1135                         error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1136                         if (error)
1137                                 goto fail;
1138                 }
1139                 error = -EIO;
1140                 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1141                 if (!bh)
1142                         goto fail;
1143                 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1144                         brelse(bh);
1145                         goto fail;
1146                 }
1147
1148                 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1149                      y++, slot++) {
1150                         struct gfs2_quota_change_host qc;
1151                         struct gfs2_quota_data *qd;
1152
1153                         gfs2_quota_change_in(&qc, bh->b_data +
1154                                           sizeof(struct gfs2_meta_header) +
1155                                           y * sizeof(struct gfs2_quota_change));
1156                         if (!qc.qc_change)
1157                                 continue;
1158
1159                         error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1160                                          qc.qc_id, &qd);
1161                         if (error) {
1162                                 brelse(bh);
1163                                 goto fail;
1164                         }
1165
1166                         set_bit(QDF_CHANGE, &qd->qd_flags);
1167                         qd->qd_change = qc.qc_change;
1168                         qd->qd_slot = slot;
1169                         qd->qd_slot_count = 1;
1170                         qd->qd_last_touched = jiffies;
1171
1172                         spin_lock(&sdp->sd_quota_spin);
1173                         gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1174                         list_add(&qd->qd_list, &sdp->sd_quota_list);
1175                         atomic_inc(&sdp->sd_quota_count);
1176                         spin_unlock(&sdp->sd_quota_spin);
1177
1178                         found++;
1179                 }
1180
1181                 brelse(bh);
1182                 dblock++;
1183                 extlen--;
1184         }
1185
1186         if (found)
1187                 fs_info(sdp, "found %u quota changes\n", found);
1188
1189         return 0;
1190
1191 fail:
1192         gfs2_quota_cleanup(sdp);
1193         return error;
1194 }
1195
1196 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1197 {
1198         struct gfs2_quota_data *qd, *safe;
1199         LIST_HEAD(dead);
1200
1201         spin_lock(&sdp->sd_quota_spin);
1202         list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1203                 if (!qd->qd_count &&
1204                     time_after_eq(jiffies, qd->qd_last_touched +
1205                                 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1206                         list_move(&qd->qd_list, &dead);
1207                         gfs2_assert_warn(sdp,
1208                                          atomic_read(&sdp->sd_quota_count) > 0);
1209                         atomic_dec(&sdp->sd_quota_count);
1210                 }
1211         }
1212         spin_unlock(&sdp->sd_quota_spin);
1213
1214         while (!list_empty(&dead)) {
1215                 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1216                 list_del(&qd->qd_list);
1217
1218                 gfs2_assert_warn(sdp, !qd->qd_change);
1219                 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1220                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1221
1222                 gfs2_lvb_unhold(qd->qd_gl);
1223                 kfree(qd);
1224         }
1225 }
1226
1227 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1228 {
1229         struct list_head *head = &sdp->sd_quota_list;
1230         struct gfs2_quota_data *qd;
1231         unsigned int x;
1232
1233         spin_lock(&sdp->sd_quota_spin);
1234         while (!list_empty(head)) {
1235                 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1236
1237                 if (qd->qd_count > 1 ||
1238                     (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1239                         list_move(&qd->qd_list, head);
1240                         spin_unlock(&sdp->sd_quota_spin);
1241                         schedule();
1242                         spin_lock(&sdp->sd_quota_spin);
1243                         continue;
1244                 }
1245
1246                 list_del(&qd->qd_list);
1247                 atomic_dec(&sdp->sd_quota_count);
1248                 spin_unlock(&sdp->sd_quota_spin);
1249
1250                 if (!qd->qd_count) {
1251                         gfs2_assert_warn(sdp, !qd->qd_change);
1252                         gfs2_assert_warn(sdp, !qd->qd_slot_count);
1253                 } else
1254                         gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1255                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1256
1257                 gfs2_lvb_unhold(qd->qd_gl);
1258                 kfree(qd);
1259
1260                 spin_lock(&sdp->sd_quota_spin);
1261         }
1262         spin_unlock(&sdp->sd_quota_spin);
1263
1264         gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1265
1266         if (sdp->sd_quota_bitmap) {
1267                 for (x = 0; x < sdp->sd_quota_chunks; x++)
1268                         kfree(sdp->sd_quota_bitmap[x]);
1269                 kfree(sdp->sd_quota_bitmap);
1270         }
1271 }
1272