Merge commit 'kumar/kumar-next' into next
[linux-2.6] / fs / gfs2 / quota.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 /*
11  * Quota change tags are associated with each transaction that allocates or
12  * deallocates space.  Those changes are accumulated locally to each node (in a
13  * per-node file) and then are periodically synced to the quota file.  This
14  * avoids the bottleneck of constantly touching the quota file, but introduces
15  * fuzziness in the current usage value of IDs that are being used on different
16  * nodes in the cluster simultaneously.  So, it is possible for a user on
17  * multiple nodes to overrun their quota, but that overrun is controlable.
18  * Since quota tags are part of transactions, there is no need to a quota check
19  * program to be run on node crashes or anything like that.
20  *
21  * There are couple of knobs that let the administrator manage the quota
22  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
23  * sitting on one node before being synced to the quota file.  (The default is
24  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
25  * of quota file syncs increases as the user moves closer to their limit.  The
26  * more frequent the syncs, the more accurate the quota enforcement, but that
27  * means that there is more contention between the nodes for the quota file.
28  * The default value is one.  This sets the maximum theoretical quota overrun
29  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
30  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
31  * number greater than one makes quota syncs more frequent and reduces the
32  * maximum overrun.  Numbers less than one (but greater than zero) make quota
33  * syncs less frequent.
34  *
35  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36  * the quota file, so it is not being constantly read.
37  */
38
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
49
50 #include "gfs2.h"
51 #include "incore.h"
52 #include "bmap.h"
53 #include "glock.h"
54 #include "glops.h"
55 #include "log.h"
56 #include "meta_io.h"
57 #include "quota.h"
58 #include "rgrp.h"
59 #include "super.h"
60 #include "trans.h"
61 #include "inode.h"
62 #include "ops_address.h"
63 #include "util.h"
64
65 #define QUOTA_USER 1
66 #define QUOTA_GROUP 0
67
68 struct gfs2_quota_host {
69         u64 qu_limit;
70         u64 qu_warn;
71         s64 qu_value;
72         u32 qu_ll_next;
73 };
74
75 struct gfs2_quota_change_host {
76         u64 qc_change;
77         u32 qc_flags; /* GFS2_QCF_... */
78         u32 qc_id;
79 };
80
81 static u64 qd2offset(struct gfs2_quota_data *qd)
82 {
83         u64 offset;
84
85         offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
86         offset *= sizeof(struct gfs2_quota);
87
88         return offset;
89 }
90
91 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
92                     struct gfs2_quota_data **qdp)
93 {
94         struct gfs2_quota_data *qd;
95         int error;
96
97         qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS);
98         if (!qd)
99                 return -ENOMEM;
100
101         qd->qd_count = 1;
102         qd->qd_id = id;
103         if (user)
104                 set_bit(QDF_USER, &qd->qd_flags);
105         qd->qd_slot = -1;
106
107         error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
108                               &gfs2_quota_glops, CREATE, &qd->qd_gl);
109         if (error)
110                 goto fail;
111
112         error = gfs2_lvb_hold(qd->qd_gl);
113         gfs2_glock_put(qd->qd_gl);
114         if (error)
115                 goto fail;
116
117         *qdp = qd;
118
119         return 0;
120
121 fail:
122         kfree(qd);
123         return error;
124 }
125
126 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
127                   struct gfs2_quota_data **qdp)
128 {
129         struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
130         int error, found;
131
132         *qdp = NULL;
133
134         for (;;) {
135                 found = 0;
136                 spin_lock(&sdp->sd_quota_spin);
137                 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
138                         if (qd->qd_id == id &&
139                             !test_bit(QDF_USER, &qd->qd_flags) == !user) {
140                                 qd->qd_count++;
141                                 found = 1;
142                                 break;
143                         }
144                 }
145
146                 if (!found)
147                         qd = NULL;
148
149                 if (!qd && new_qd) {
150                         qd = new_qd;
151                         list_add(&qd->qd_list, &sdp->sd_quota_list);
152                         atomic_inc(&sdp->sd_quota_count);
153                         new_qd = NULL;
154                 }
155
156                 spin_unlock(&sdp->sd_quota_spin);
157
158                 if (qd || !create) {
159                         if (new_qd) {
160                                 gfs2_lvb_unhold(new_qd->qd_gl);
161                                 kfree(new_qd);
162                         }
163                         *qdp = qd;
164                         return 0;
165                 }
166
167                 error = qd_alloc(sdp, user, id, &new_qd);
168                 if (error)
169                         return error;
170         }
171 }
172
173 static void qd_hold(struct gfs2_quota_data *qd)
174 {
175         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
176
177         spin_lock(&sdp->sd_quota_spin);
178         gfs2_assert(sdp, qd->qd_count);
179         qd->qd_count++;
180         spin_unlock(&sdp->sd_quota_spin);
181 }
182
183 static void qd_put(struct gfs2_quota_data *qd)
184 {
185         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
186         spin_lock(&sdp->sd_quota_spin);
187         gfs2_assert(sdp, qd->qd_count);
188         if (!--qd->qd_count)
189                 qd->qd_last_touched = jiffies;
190         spin_unlock(&sdp->sd_quota_spin);
191 }
192
193 static int slot_get(struct gfs2_quota_data *qd)
194 {
195         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
196         unsigned int c, o = 0, b;
197         unsigned char byte = 0;
198
199         spin_lock(&sdp->sd_quota_spin);
200
201         if (qd->qd_slot_count++) {
202                 spin_unlock(&sdp->sd_quota_spin);
203                 return 0;
204         }
205
206         for (c = 0; c < sdp->sd_quota_chunks; c++)
207                 for (o = 0; o < PAGE_SIZE; o++) {
208                         byte = sdp->sd_quota_bitmap[c][o];
209                         if (byte != 0xFF)
210                                 goto found;
211                 }
212
213         goto fail;
214
215 found:
216         for (b = 0; b < 8; b++)
217                 if (!(byte & (1 << b)))
218                         break;
219         qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
220
221         if (qd->qd_slot >= sdp->sd_quota_slots)
222                 goto fail;
223
224         sdp->sd_quota_bitmap[c][o] |= 1 << b;
225
226         spin_unlock(&sdp->sd_quota_spin);
227
228         return 0;
229
230 fail:
231         qd->qd_slot_count--;
232         spin_unlock(&sdp->sd_quota_spin);
233         return -ENOSPC;
234 }
235
236 static void slot_hold(struct gfs2_quota_data *qd)
237 {
238         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
239
240         spin_lock(&sdp->sd_quota_spin);
241         gfs2_assert(sdp, qd->qd_slot_count);
242         qd->qd_slot_count++;
243         spin_unlock(&sdp->sd_quota_spin);
244 }
245
246 static void slot_put(struct gfs2_quota_data *qd)
247 {
248         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
249
250         spin_lock(&sdp->sd_quota_spin);
251         gfs2_assert(sdp, qd->qd_slot_count);
252         if (!--qd->qd_slot_count) {
253                 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
254                 qd->qd_slot = -1;
255         }
256         spin_unlock(&sdp->sd_quota_spin);
257 }
258
259 static int bh_get(struct gfs2_quota_data *qd)
260 {
261         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
262         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
263         unsigned int block, offset;
264         struct buffer_head *bh;
265         int error;
266         struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
267
268         mutex_lock(&sdp->sd_quota_mutex);
269
270         if (qd->qd_bh_count++) {
271                 mutex_unlock(&sdp->sd_quota_mutex);
272                 return 0;
273         }
274
275         block = qd->qd_slot / sdp->sd_qc_per_block;
276         offset = qd->qd_slot % sdp->sd_qc_per_block;
277
278         bh_map.b_size = 1 << ip->i_inode.i_blkbits;
279         error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
280         if (error)
281                 goto fail;
282         error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
283         if (error)
284                 goto fail;
285         error = -EIO;
286         if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
287                 goto fail_brelse;
288
289         qd->qd_bh = bh;
290         qd->qd_bh_qc = (struct gfs2_quota_change *)
291                 (bh->b_data + sizeof(struct gfs2_meta_header) +
292                  offset * sizeof(struct gfs2_quota_change));
293
294         mutex_unlock(&sdp->sd_quota_mutex);
295
296         return 0;
297
298 fail_brelse:
299         brelse(bh);
300 fail:
301         qd->qd_bh_count--;
302         mutex_unlock(&sdp->sd_quota_mutex);
303         return error;
304 }
305
306 static void bh_put(struct gfs2_quota_data *qd)
307 {
308         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
309
310         mutex_lock(&sdp->sd_quota_mutex);
311         gfs2_assert(sdp, qd->qd_bh_count);
312         if (!--qd->qd_bh_count) {
313                 brelse(qd->qd_bh);
314                 qd->qd_bh = NULL;
315                 qd->qd_bh_qc = NULL;
316         }
317         mutex_unlock(&sdp->sd_quota_mutex);
318 }
319
320 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
321 {
322         struct gfs2_quota_data *qd = NULL;
323         int error;
324         int found = 0;
325
326         *qdp = NULL;
327
328         if (sdp->sd_vfs->s_flags & MS_RDONLY)
329                 return 0;
330
331         spin_lock(&sdp->sd_quota_spin);
332
333         list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
334                 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
335                     !test_bit(QDF_CHANGE, &qd->qd_flags) ||
336                     qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
337                         continue;
338
339                 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
340
341                 set_bit(QDF_LOCKED, &qd->qd_flags);
342                 gfs2_assert_warn(sdp, qd->qd_count);
343                 qd->qd_count++;
344                 qd->qd_change_sync = qd->qd_change;
345                 gfs2_assert_warn(sdp, qd->qd_slot_count);
346                 qd->qd_slot_count++;
347                 found = 1;
348
349                 break;
350         }
351
352         if (!found)
353                 qd = NULL;
354
355         spin_unlock(&sdp->sd_quota_spin);
356
357         if (qd) {
358                 gfs2_assert_warn(sdp, qd->qd_change_sync);
359                 error = bh_get(qd);
360                 if (error) {
361                         clear_bit(QDF_LOCKED, &qd->qd_flags);
362                         slot_put(qd);
363                         qd_put(qd);
364                         return error;
365                 }
366         }
367
368         *qdp = qd;
369
370         return 0;
371 }
372
373 static int qd_trylock(struct gfs2_quota_data *qd)
374 {
375         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
376
377         if (sdp->sd_vfs->s_flags & MS_RDONLY)
378                 return 0;
379
380         spin_lock(&sdp->sd_quota_spin);
381
382         if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
383             !test_bit(QDF_CHANGE, &qd->qd_flags)) {
384                 spin_unlock(&sdp->sd_quota_spin);
385                 return 0;
386         }
387
388         list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
389
390         set_bit(QDF_LOCKED, &qd->qd_flags);
391         gfs2_assert_warn(sdp, qd->qd_count);
392         qd->qd_count++;
393         qd->qd_change_sync = qd->qd_change;
394         gfs2_assert_warn(sdp, qd->qd_slot_count);
395         qd->qd_slot_count++;
396
397         spin_unlock(&sdp->sd_quota_spin);
398
399         gfs2_assert_warn(sdp, qd->qd_change_sync);
400         if (bh_get(qd)) {
401                 clear_bit(QDF_LOCKED, &qd->qd_flags);
402                 slot_put(qd);
403                 qd_put(qd);
404                 return 0;
405         }
406
407         return 1;
408 }
409
410 static void qd_unlock(struct gfs2_quota_data *qd)
411 {
412         gfs2_assert_warn(qd->qd_gl->gl_sbd,
413                          test_bit(QDF_LOCKED, &qd->qd_flags));
414         clear_bit(QDF_LOCKED, &qd->qd_flags);
415         bh_put(qd);
416         slot_put(qd);
417         qd_put(qd);
418 }
419
420 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
421                     struct gfs2_quota_data **qdp)
422 {
423         int error;
424
425         error = qd_get(sdp, user, id, create, qdp);
426         if (error)
427                 return error;
428
429         error = slot_get(*qdp);
430         if (error)
431                 goto fail;
432
433         error = bh_get(*qdp);
434         if (error)
435                 goto fail_slot;
436
437         return 0;
438
439 fail_slot:
440         slot_put(*qdp);
441 fail:
442         qd_put(*qdp);
443         return error;
444 }
445
446 static void qdsb_put(struct gfs2_quota_data *qd)
447 {
448         bh_put(qd);
449         slot_put(qd);
450         qd_put(qd);
451 }
452
453 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
454 {
455         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
456         struct gfs2_alloc *al = ip->i_alloc;
457         struct gfs2_quota_data **qd = al->al_qd;
458         int error;
459
460         if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
461             gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
462                 return -EIO;
463
464         if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
465                 return 0;
466
467         error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
468         if (error)
469                 goto out;
470         al->al_qd_num++;
471         qd++;
472
473         error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
474         if (error)
475                 goto out;
476         al->al_qd_num++;
477         qd++;
478
479         if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
480                 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
481                 if (error)
482                         goto out;
483                 al->al_qd_num++;
484                 qd++;
485         }
486
487         if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
488                 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
489                 if (error)
490                         goto out;
491                 al->al_qd_num++;
492                 qd++;
493         }
494
495 out:
496         if (error)
497                 gfs2_quota_unhold(ip);
498         return error;
499 }
500
501 void gfs2_quota_unhold(struct gfs2_inode *ip)
502 {
503         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
504         struct gfs2_alloc *al = ip->i_alloc;
505         unsigned int x;
506
507         gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
508
509         for (x = 0; x < al->al_qd_num; x++) {
510                 qdsb_put(al->al_qd[x]);
511                 al->al_qd[x] = NULL;
512         }
513         al->al_qd_num = 0;
514 }
515
516 static int sort_qd(const void *a, const void *b)
517 {
518         const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
519         const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
520
521         if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
522             !test_bit(QDF_USER, &qd_b->qd_flags)) {
523                 if (test_bit(QDF_USER, &qd_a->qd_flags))
524                         return -1;
525                 else
526                         return 1;
527         }
528         if (qd_a->qd_id < qd_b->qd_id)
529                 return -1;
530         if (qd_a->qd_id > qd_b->qd_id)
531                 return 1;
532
533         return 0;
534 }
535
536 static void do_qc(struct gfs2_quota_data *qd, s64 change)
537 {
538         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
539         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
540         struct gfs2_quota_change *qc = qd->qd_bh_qc;
541         s64 x;
542
543         mutex_lock(&sdp->sd_quota_mutex);
544         gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
545
546         if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
547                 qc->qc_change = 0;
548                 qc->qc_flags = 0;
549                 if (test_bit(QDF_USER, &qd->qd_flags))
550                         qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
551                 qc->qc_id = cpu_to_be32(qd->qd_id);
552         }
553
554         x = be64_to_cpu(qc->qc_change) + change;
555         qc->qc_change = cpu_to_be64(x);
556
557         spin_lock(&sdp->sd_quota_spin);
558         qd->qd_change = x;
559         spin_unlock(&sdp->sd_quota_spin);
560
561         if (!x) {
562                 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
563                 clear_bit(QDF_CHANGE, &qd->qd_flags);
564                 qc->qc_flags = 0;
565                 qc->qc_id = 0;
566                 slot_put(qd);
567                 qd_put(qd);
568         } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
569                 qd_hold(qd);
570                 slot_hold(qd);
571         }
572
573         mutex_unlock(&sdp->sd_quota_mutex);
574 }
575
576 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
577 {
578         const struct gfs2_quota *str = buf;
579
580         qu->qu_limit = be64_to_cpu(str->qu_limit);
581         qu->qu_warn = be64_to_cpu(str->qu_warn);
582         qu->qu_value = be64_to_cpu(str->qu_value);
583         qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
584 }
585
586 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
587 {
588         struct gfs2_quota *str = buf;
589
590         str->qu_limit = cpu_to_be64(qu->qu_limit);
591         str->qu_warn = cpu_to_be64(qu->qu_warn);
592         str->qu_value = cpu_to_be64(qu->qu_value);
593         str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
594         memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
595 }
596
597 /**
598  * gfs2_adjust_quota
599  *
600  * This function was mostly borrowed from gfs2_block_truncate_page which was
601  * in turn mostly borrowed from ext3
602  */
603 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
604                              s64 change, struct gfs2_quota_data *qd)
605 {
606         struct inode *inode = &ip->i_inode;
607         struct address_space *mapping = inode->i_mapping;
608         unsigned long index = loc >> PAGE_CACHE_SHIFT;
609         unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
610         unsigned blocksize, iblock, pos;
611         struct buffer_head *bh;
612         struct page *page;
613         void *kaddr;
614         char *ptr;
615         struct gfs2_quota_host qp;
616         s64 value;
617         int err = -EIO;
618
619         if (gfs2_is_stuffed(ip))
620                 gfs2_unstuff_dinode(ip, NULL);
621         
622         page = grab_cache_page(mapping, index);
623         if (!page)
624                 return -ENOMEM;
625
626         blocksize = inode->i_sb->s_blocksize;
627         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
628
629         if (!page_has_buffers(page))
630                 create_empty_buffers(page, blocksize, 0);
631
632         bh = page_buffers(page);
633         pos = blocksize;
634         while (offset >= pos) {
635                 bh = bh->b_this_page;
636                 iblock++;
637                 pos += blocksize;
638         }
639
640         if (!buffer_mapped(bh)) {
641                 gfs2_block_map(inode, iblock, bh, 1);
642                 if (!buffer_mapped(bh))
643                         goto unlock;
644         }
645
646         if (PageUptodate(page))
647                 set_buffer_uptodate(bh);
648
649         if (!buffer_uptodate(bh)) {
650                 ll_rw_block(READ_META, 1, &bh);
651                 wait_on_buffer(bh);
652                 if (!buffer_uptodate(bh))
653                         goto unlock;
654         }
655
656         gfs2_trans_add_bh(ip->i_gl, bh, 0);
657
658         kaddr = kmap_atomic(page, KM_USER0);
659         ptr = kaddr + offset;
660         gfs2_quota_in(&qp, ptr);
661         qp.qu_value += change;
662         value = qp.qu_value;
663         gfs2_quota_out(&qp, ptr);
664         flush_dcache_page(page);
665         kunmap_atomic(kaddr, KM_USER0);
666         err = 0;
667         qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
668         qd->qd_qb.qb_value = cpu_to_be64(value);
669         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
670         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
671 unlock:
672         unlock_page(page);
673         page_cache_release(page);
674         return err;
675 }
676
677 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
678 {
679         struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
680         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
681         unsigned int data_blocks, ind_blocks;
682         struct gfs2_holder *ghs, i_gh;
683         unsigned int qx, x;
684         struct gfs2_quota_data *qd;
685         loff_t offset;
686         unsigned int nalloc = 0, blocks;
687         struct gfs2_alloc *al = NULL;
688         int error;
689
690         gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
691                               &data_blocks, &ind_blocks);
692
693         ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
694         if (!ghs)
695                 return -ENOMEM;
696
697         sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
698         for (qx = 0; qx < num_qd; qx++) {
699                 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
700                                            LM_ST_EXCLUSIVE,
701                                            GL_NOCACHE, &ghs[qx]);
702                 if (error)
703                         goto out;
704         }
705
706         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
707         if (error)
708                 goto out;
709
710         for (x = 0; x < num_qd; x++) {
711                 int alloc_required;
712
713                 offset = qd2offset(qda[x]);
714                 error = gfs2_write_alloc_required(ip, offset,
715                                                   sizeof(struct gfs2_quota),
716                                                   &alloc_required);
717                 if (error)
718                         goto out_gunlock;
719                 if (alloc_required)
720                         nalloc++;
721         }
722
723         al = gfs2_alloc_get(ip);
724         if (!al) {
725                 error = -ENOMEM;
726                 goto out_gunlock;
727         }
728         /* 
729          * 1 blk for unstuffing inode if stuffed. We add this extra
730          * block to the reservation unconditionally. If the inode
731          * doesn't need unstuffing, the block will be released to the 
732          * rgrp since it won't be allocated during the transaction
733          */
734         al->al_requested = 1;
735         /* +1 in the end for block requested above for unstuffing */
736         blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
737
738         if (nalloc)
739                 al->al_requested += nalloc * (data_blocks + ind_blocks);                
740         error = gfs2_inplace_reserve(ip);
741         if (error)
742                 goto out_alloc;
743
744         if (nalloc)
745                 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
746
747         error = gfs2_trans_begin(sdp, blocks, 0);
748         if (error)
749                 goto out_ipres;
750
751         for (x = 0; x < num_qd; x++) {
752                 qd = qda[x];
753                 offset = qd2offset(qd);
754                 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
755                                           (struct gfs2_quota_data *)
756                                           qd);
757                 if (error)
758                         goto out_end_trans;
759
760                 do_qc(qd, -qd->qd_change_sync);
761         }
762
763         error = 0;
764
765 out_end_trans:
766         gfs2_trans_end(sdp);
767 out_ipres:
768         gfs2_inplace_release(ip);
769 out_alloc:
770         gfs2_alloc_put(ip);
771 out_gunlock:
772         gfs2_glock_dq_uninit(&i_gh);
773 out:
774         while (qx--)
775                 gfs2_glock_dq_uninit(&ghs[qx]);
776         kfree(ghs);
777         gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
778         return error;
779 }
780
781 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
782                     struct gfs2_holder *q_gh)
783 {
784         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
785         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
786         struct gfs2_holder i_gh;
787         struct gfs2_quota_host q;
788         char buf[sizeof(struct gfs2_quota)];
789         int error;
790         struct gfs2_quota_lvb *qlvb;
791
792 restart:
793         error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
794         if (error)
795                 return error;
796
797         qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
798
799         if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
800                 loff_t pos;
801                 gfs2_glock_dq_uninit(q_gh);
802                 error = gfs2_glock_nq_init(qd->qd_gl,
803                                           LM_ST_EXCLUSIVE, GL_NOCACHE,
804                                           q_gh);
805                 if (error)
806                         return error;
807
808                 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
809                 if (error)
810                         goto fail;
811
812                 memset(buf, 0, sizeof(struct gfs2_quota));
813                 pos = qd2offset(qd);
814                 error = gfs2_internal_read(ip, NULL, buf, &pos,
815                                            sizeof(struct gfs2_quota));
816                 if (error < 0)
817                         goto fail_gunlock;
818
819                 gfs2_glock_dq_uninit(&i_gh);
820
821
822                 gfs2_quota_in(&q, buf);
823                 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
824                 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
825                 qlvb->__pad = 0;
826                 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
827                 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
828                 qlvb->qb_value = cpu_to_be64(q.qu_value);
829                 qd->qd_qb = *qlvb;
830
831                 if (gfs2_glock_is_blocking(qd->qd_gl)) {
832                         gfs2_glock_dq_uninit(q_gh);
833                         force_refresh = 0;
834                         goto restart;
835                 }
836         }
837
838         return 0;
839
840 fail_gunlock:
841         gfs2_glock_dq_uninit(&i_gh);
842 fail:
843         gfs2_glock_dq_uninit(q_gh);
844         return error;
845 }
846
847 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
848 {
849         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
850         struct gfs2_alloc *al = ip->i_alloc;
851         unsigned int x;
852         int error = 0;
853
854         gfs2_quota_hold(ip, uid, gid);
855
856         if (capable(CAP_SYS_RESOURCE) ||
857             sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
858                 return 0;
859
860         sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
861              sort_qd, NULL);
862
863         for (x = 0; x < al->al_qd_num; x++) {
864                 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
865                 if (error)
866                         break;
867         }
868
869         if (!error)
870                 set_bit(GIF_QD_LOCKED, &ip->i_flags);
871         else {
872                 while (x--)
873                         gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
874                 gfs2_quota_unhold(ip);
875         }
876
877         return error;
878 }
879
880 static int need_sync(struct gfs2_quota_data *qd)
881 {
882         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
883         struct gfs2_tune *gt = &sdp->sd_tune;
884         s64 value;
885         unsigned int num, den;
886         int do_sync = 1;
887
888         if (!qd->qd_qb.qb_limit)
889                 return 0;
890
891         spin_lock(&sdp->sd_quota_spin);
892         value = qd->qd_change;
893         spin_unlock(&sdp->sd_quota_spin);
894
895         spin_lock(&gt->gt_spin);
896         num = gt->gt_quota_scale_num;
897         den = gt->gt_quota_scale_den;
898         spin_unlock(&gt->gt_spin);
899
900         if (value < 0)
901                 do_sync = 0;
902         else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
903                  (s64)be64_to_cpu(qd->qd_qb.qb_limit))
904                 do_sync = 0;
905         else {
906                 value *= gfs2_jindex_size(sdp) * num;
907                 value = div_s64(value, den);
908                 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
909                 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
910                         do_sync = 0;
911         }
912
913         return do_sync;
914 }
915
916 void gfs2_quota_unlock(struct gfs2_inode *ip)
917 {
918         struct gfs2_alloc *al = ip->i_alloc;
919         struct gfs2_quota_data *qda[4];
920         unsigned int count = 0;
921         unsigned int x;
922
923         if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
924                 goto out;
925
926         for (x = 0; x < al->al_qd_num; x++) {
927                 struct gfs2_quota_data *qd;
928                 int sync;
929
930                 qd = al->al_qd[x];
931                 sync = need_sync(qd);
932
933                 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
934
935                 if (sync && qd_trylock(qd))
936                         qda[count++] = qd;
937         }
938
939         if (count) {
940                 do_sync(count, qda);
941                 for (x = 0; x < count; x++)
942                         qd_unlock(qda[x]);
943         }
944
945 out:
946         gfs2_quota_unhold(ip);
947 }
948
949 #define MAX_LINE 256
950
951 static int print_message(struct gfs2_quota_data *qd, char *type)
952 {
953         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
954
955         printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
956                sdp->sd_fsname, type,
957                (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
958                qd->qd_id);
959
960         return 0;
961 }
962
963 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
964 {
965         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
966         struct gfs2_alloc *al = ip->i_alloc;
967         struct gfs2_quota_data *qd;
968         s64 value;
969         unsigned int x;
970         int error = 0;
971
972         if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
973                 return 0;
974
975         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
976                 return 0;
977
978         for (x = 0; x < al->al_qd_num; x++) {
979                 qd = al->al_qd[x];
980
981                 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
982                       (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
983                         continue;
984
985                 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
986                 spin_lock(&sdp->sd_quota_spin);
987                 value += qd->qd_change;
988                 spin_unlock(&sdp->sd_quota_spin);
989
990                 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
991                         print_message(qd, "exceeded");
992                         error = -EDQUOT;
993                         break;
994                 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
995                            (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
996                            time_after_eq(jiffies, qd->qd_last_warn +
997                                          gfs2_tune_get(sdp,
998                                                 gt_quota_warn_period) * HZ)) {
999                         error = print_message(qd, "warning");
1000                         qd->qd_last_warn = jiffies;
1001                 }
1002         }
1003
1004         return error;
1005 }
1006
1007 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1008                        u32 uid, u32 gid)
1009 {
1010         struct gfs2_alloc *al = ip->i_alloc;
1011         struct gfs2_quota_data *qd;
1012         unsigned int x;
1013
1014         if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1015                 return;
1016         if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
1017                 return;
1018
1019         for (x = 0; x < al->al_qd_num; x++) {
1020                 qd = al->al_qd[x];
1021
1022                 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1023                     (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1024                         do_qc(qd, change);
1025                 }
1026         }
1027 }
1028
1029 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1030 {
1031         struct gfs2_quota_data **qda;
1032         unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1033         unsigned int num_qd;
1034         unsigned int x;
1035         int error = 0;
1036
1037         sdp->sd_quota_sync_gen++;
1038
1039         qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1040         if (!qda)
1041                 return -ENOMEM;
1042
1043         do {
1044                 num_qd = 0;
1045
1046                 for (;;) {
1047                         error = qd_fish(sdp, qda + num_qd);
1048                         if (error || !qda[num_qd])
1049                                 break;
1050                         if (++num_qd == max_qd)
1051                                 break;
1052                 }
1053
1054                 if (num_qd) {
1055                         if (!error)
1056                                 error = do_sync(num_qd, qda);
1057                         if (!error)
1058                                 for (x = 0; x < num_qd; x++)
1059                                         qda[x]->qd_sync_gen =
1060                                                 sdp->sd_quota_sync_gen;
1061
1062                         for (x = 0; x < num_qd; x++)
1063                                 qd_unlock(qda[x]);
1064                 }
1065         } while (!error && num_qd == max_qd);
1066
1067         kfree(qda);
1068
1069         return error;
1070 }
1071
1072 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1073 {
1074         struct gfs2_quota_data *qd;
1075         struct gfs2_holder q_gh;
1076         int error;
1077
1078         error = qd_get(sdp, user, id, CREATE, &qd);
1079         if (error)
1080                 return error;
1081
1082         error = do_glock(qd, FORCE, &q_gh);
1083         if (!error)
1084                 gfs2_glock_dq_uninit(&q_gh);
1085
1086         qd_put(qd);
1087
1088         return error;
1089 }
1090
1091 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1092 {
1093         const struct gfs2_quota_change *str = buf;
1094
1095         qc->qc_change = be64_to_cpu(str->qc_change);
1096         qc->qc_flags = be32_to_cpu(str->qc_flags);
1097         qc->qc_id = be32_to_cpu(str->qc_id);
1098 }
1099
1100 int gfs2_quota_init(struct gfs2_sbd *sdp)
1101 {
1102         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1103         unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1104         unsigned int x, slot = 0;
1105         unsigned int found = 0;
1106         u64 dblock;
1107         u32 extlen = 0;
1108         int error;
1109
1110         if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1111             ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1112                 gfs2_consist_inode(ip);
1113                 return -EIO;
1114         }
1115         sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1116         sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1117
1118         error = -ENOMEM;
1119
1120         sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1121                                        sizeof(unsigned char *), GFP_NOFS);
1122         if (!sdp->sd_quota_bitmap)
1123                 return error;
1124
1125         for (x = 0; x < sdp->sd_quota_chunks; x++) {
1126                 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1127                 if (!sdp->sd_quota_bitmap[x])
1128                         goto fail;
1129         }
1130
1131         for (x = 0; x < blocks; x++) {
1132                 struct buffer_head *bh;
1133                 unsigned int y;
1134
1135                 if (!extlen) {
1136                         int new = 0;
1137                         error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1138                         if (error)
1139                                 goto fail;
1140                 }
1141                 error = -EIO;
1142                 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1143                 if (!bh)
1144                         goto fail;
1145                 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1146                         brelse(bh);
1147                         goto fail;
1148                 }
1149
1150                 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1151                      y++, slot++) {
1152                         struct gfs2_quota_change_host qc;
1153                         struct gfs2_quota_data *qd;
1154
1155                         gfs2_quota_change_in(&qc, bh->b_data +
1156                                           sizeof(struct gfs2_meta_header) +
1157                                           y * sizeof(struct gfs2_quota_change));
1158                         if (!qc.qc_change)
1159                                 continue;
1160
1161                         error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1162                                          qc.qc_id, &qd);
1163                         if (error) {
1164                                 brelse(bh);
1165                                 goto fail;
1166                         }
1167
1168                         set_bit(QDF_CHANGE, &qd->qd_flags);
1169                         qd->qd_change = qc.qc_change;
1170                         qd->qd_slot = slot;
1171                         qd->qd_slot_count = 1;
1172                         qd->qd_last_touched = jiffies;
1173
1174                         spin_lock(&sdp->sd_quota_spin);
1175                         gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1176                         list_add(&qd->qd_list, &sdp->sd_quota_list);
1177                         atomic_inc(&sdp->sd_quota_count);
1178                         spin_unlock(&sdp->sd_quota_spin);
1179
1180                         found++;
1181                 }
1182
1183                 brelse(bh);
1184                 dblock++;
1185                 extlen--;
1186         }
1187
1188         if (found)
1189                 fs_info(sdp, "found %u quota changes\n", found);
1190
1191         return 0;
1192
1193 fail:
1194         gfs2_quota_cleanup(sdp);
1195         return error;
1196 }
1197
1198 void gfs2_quota_scan(struct gfs2_sbd *sdp)
1199 {
1200         struct gfs2_quota_data *qd, *safe;
1201         LIST_HEAD(dead);
1202
1203         spin_lock(&sdp->sd_quota_spin);
1204         list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1205                 if (!qd->qd_count &&
1206                     time_after_eq(jiffies, qd->qd_last_touched +
1207                                 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1208                         list_move(&qd->qd_list, &dead);
1209                         gfs2_assert_warn(sdp,
1210                                          atomic_read(&sdp->sd_quota_count) > 0);
1211                         atomic_dec(&sdp->sd_quota_count);
1212                 }
1213         }
1214         spin_unlock(&sdp->sd_quota_spin);
1215
1216         while (!list_empty(&dead)) {
1217                 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1218                 list_del(&qd->qd_list);
1219
1220                 gfs2_assert_warn(sdp, !qd->qd_change);
1221                 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1222                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1223
1224                 gfs2_lvb_unhold(qd->qd_gl);
1225                 kfree(qd);
1226         }
1227 }
1228
1229 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1230 {
1231         struct list_head *head = &sdp->sd_quota_list;
1232         struct gfs2_quota_data *qd;
1233         unsigned int x;
1234
1235         spin_lock(&sdp->sd_quota_spin);
1236         while (!list_empty(head)) {
1237                 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1238
1239                 if (qd->qd_count > 1 ||
1240                     (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1241                         list_move(&qd->qd_list, head);
1242                         spin_unlock(&sdp->sd_quota_spin);
1243                         schedule();
1244                         spin_lock(&sdp->sd_quota_spin);
1245                         continue;
1246                 }
1247
1248                 list_del(&qd->qd_list);
1249                 atomic_dec(&sdp->sd_quota_count);
1250                 spin_unlock(&sdp->sd_quota_spin);
1251
1252                 if (!qd->qd_count) {
1253                         gfs2_assert_warn(sdp, !qd->qd_change);
1254                         gfs2_assert_warn(sdp, !qd->qd_slot_count);
1255                 } else
1256                         gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1257                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1258
1259                 gfs2_lvb_unhold(qd->qd_gl);
1260                 kfree(qd);
1261
1262                 spin_lock(&sdp->sd_quota_spin);
1263         }
1264         spin_unlock(&sdp->sd_quota_spin);
1265
1266         gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1267
1268         if (sdp->sd_quota_bitmap) {
1269                 for (x = 0; x < sdp->sd_quota_chunks; x++)
1270                         kfree(sdp->sd_quota_bitmap[x]);
1271                 kfree(sdp->sd_quota_bitmap);
1272         }
1273 }
1274