avr32: fix out-of-range rjmp instruction on large kernels
[linux-2.6] / fs / gfs2 / quota.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 /*
11  * Quota change tags are associated with each transaction that allocates or
12  * deallocates space.  Those changes are accumulated locally to each node (in a
13  * per-node file) and then are periodically synced to the quota file.  This
14  * avoids the bottleneck of constantly touching the quota file, but introduces
15  * fuzziness in the current usage value of IDs that are being used on different
16  * nodes in the cluster simultaneously.  So, it is possible for a user on
17  * multiple nodes to overrun their quota, but that overrun is controlable.
18  * Since quota tags are part of transactions, there is no need to a quota check
19  * program to be run on node crashes or anything like that.
20  *
21  * There are couple of knobs that let the administrator manage the quota
22  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
23  * sitting on one node before being synced to the quota file.  (The default is
24  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
25  * of quota file syncs increases as the user moves closer to their limit.  The
26  * more frequent the syncs, the more accurate the quota enforcement, but that
27  * means that there is more contention between the nodes for the quota file.
28  * The default value is one.  This sets the maximum theoretical quota overrun
29  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
30  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
31  * number greater than one makes quota syncs more frequent and reduces the
32  * maximum overrun.  Numbers less than one (but greater than zero) make quota
33  * syncs less frequent.
34  *
35  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36  * the quota file, so it is not being constantly read.
37  */
38
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
45 #include <linux/fs.h>
46 #include <linux/bio.h>
47 #include <linux/gfs2_ondisk.h>
48 #include <linux/lm_interface.h>
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
51
52 #include "gfs2.h"
53 #include "incore.h"
54 #include "bmap.h"
55 #include "glock.h"
56 #include "glops.h"
57 #include "log.h"
58 #include "meta_io.h"
59 #include "quota.h"
60 #include "rgrp.h"
61 #include "super.h"
62 #include "trans.h"
63 #include "inode.h"
64 #include "ops_address.h"
65 #include "util.h"
66
67 #define QUOTA_USER 1
68 #define QUOTA_GROUP 0
69
70 struct gfs2_quota_host {
71         u64 qu_limit;
72         u64 qu_warn;
73         s64 qu_value;
74         u32 qu_ll_next;
75 };
76
77 struct gfs2_quota_change_host {
78         u64 qc_change;
79         u32 qc_flags; /* GFS2_QCF_... */
80         u32 qc_id;
81 };
82
83 static u64 qd2offset(struct gfs2_quota_data *qd)
84 {
85         u64 offset;
86
87         offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
88         offset *= sizeof(struct gfs2_quota);
89
90         return offset;
91 }
92
93 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
94                     struct gfs2_quota_data **qdp)
95 {
96         struct gfs2_quota_data *qd;
97         int error;
98
99         qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
100         if (!qd)
101                 return -ENOMEM;
102
103         qd->qd_count = 1;
104         qd->qd_id = id;
105         if (user)
106                 set_bit(QDF_USER, &qd->qd_flags);
107         qd->qd_slot = -1;
108
109         error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
110                               &gfs2_quota_glops, CREATE, &qd->qd_gl);
111         if (error)
112                 goto fail;
113
114         error = gfs2_lvb_hold(qd->qd_gl);
115         gfs2_glock_put(qd->qd_gl);
116         if (error)
117                 goto fail;
118
119         *qdp = qd;
120
121         return 0;
122
123 fail:
124         kmem_cache_free(gfs2_quotad_cachep, qd);
125         return error;
126 }
127
128 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
129                   struct gfs2_quota_data **qdp)
130 {
131         struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
132         int error, found;
133
134         *qdp = NULL;
135
136         for (;;) {
137                 found = 0;
138                 spin_lock(&sdp->sd_quota_spin);
139                 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
140                         if (qd->qd_id == id &&
141                             !test_bit(QDF_USER, &qd->qd_flags) == !user) {
142                                 qd->qd_count++;
143                                 found = 1;
144                                 break;
145                         }
146                 }
147
148                 if (!found)
149                         qd = NULL;
150
151                 if (!qd && new_qd) {
152                         qd = new_qd;
153                         list_add(&qd->qd_list, &sdp->sd_quota_list);
154                         atomic_inc(&sdp->sd_quota_count);
155                         new_qd = NULL;
156                 }
157
158                 spin_unlock(&sdp->sd_quota_spin);
159
160                 if (qd || !create) {
161                         if (new_qd) {
162                                 gfs2_lvb_unhold(new_qd->qd_gl);
163                                 kmem_cache_free(gfs2_quotad_cachep, new_qd);
164                         }
165                         *qdp = qd;
166                         return 0;
167                 }
168
169                 error = qd_alloc(sdp, user, id, &new_qd);
170                 if (error)
171                         return error;
172         }
173 }
174
175 static void qd_hold(struct gfs2_quota_data *qd)
176 {
177         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
178
179         spin_lock(&sdp->sd_quota_spin);
180         gfs2_assert(sdp, qd->qd_count);
181         qd->qd_count++;
182         spin_unlock(&sdp->sd_quota_spin);
183 }
184
185 static void qd_put(struct gfs2_quota_data *qd)
186 {
187         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
188         spin_lock(&sdp->sd_quota_spin);
189         gfs2_assert(sdp, qd->qd_count);
190         if (!--qd->qd_count)
191                 qd->qd_last_touched = jiffies;
192         spin_unlock(&sdp->sd_quota_spin);
193 }
194
195 static int slot_get(struct gfs2_quota_data *qd)
196 {
197         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
198         unsigned int c, o = 0, b;
199         unsigned char byte = 0;
200
201         spin_lock(&sdp->sd_quota_spin);
202
203         if (qd->qd_slot_count++) {
204                 spin_unlock(&sdp->sd_quota_spin);
205                 return 0;
206         }
207
208         for (c = 0; c < sdp->sd_quota_chunks; c++)
209                 for (o = 0; o < PAGE_SIZE; o++) {
210                         byte = sdp->sd_quota_bitmap[c][o];
211                         if (byte != 0xFF)
212                                 goto found;
213                 }
214
215         goto fail;
216
217 found:
218         for (b = 0; b < 8; b++)
219                 if (!(byte & (1 << b)))
220                         break;
221         qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
222
223         if (qd->qd_slot >= sdp->sd_quota_slots)
224                 goto fail;
225
226         sdp->sd_quota_bitmap[c][o] |= 1 << b;
227
228         spin_unlock(&sdp->sd_quota_spin);
229
230         return 0;
231
232 fail:
233         qd->qd_slot_count--;
234         spin_unlock(&sdp->sd_quota_spin);
235         return -ENOSPC;
236 }
237
238 static void slot_hold(struct gfs2_quota_data *qd)
239 {
240         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
241
242         spin_lock(&sdp->sd_quota_spin);
243         gfs2_assert(sdp, qd->qd_slot_count);
244         qd->qd_slot_count++;
245         spin_unlock(&sdp->sd_quota_spin);
246 }
247
248 static void slot_put(struct gfs2_quota_data *qd)
249 {
250         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
251
252         spin_lock(&sdp->sd_quota_spin);
253         gfs2_assert(sdp, qd->qd_slot_count);
254         if (!--qd->qd_slot_count) {
255                 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
256                 qd->qd_slot = -1;
257         }
258         spin_unlock(&sdp->sd_quota_spin);
259 }
260
261 static int bh_get(struct gfs2_quota_data *qd)
262 {
263         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
264         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
265         unsigned int block, offset;
266         struct buffer_head *bh;
267         int error;
268         struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
269
270         mutex_lock(&sdp->sd_quota_mutex);
271
272         if (qd->qd_bh_count++) {
273                 mutex_unlock(&sdp->sd_quota_mutex);
274                 return 0;
275         }
276
277         block = qd->qd_slot / sdp->sd_qc_per_block;
278         offset = qd->qd_slot % sdp->sd_qc_per_block;
279
280         bh_map.b_size = 1 << ip->i_inode.i_blkbits;
281         error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
282         if (error)
283                 goto fail;
284         error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
285         if (error)
286                 goto fail;
287         error = -EIO;
288         if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
289                 goto fail_brelse;
290
291         qd->qd_bh = bh;
292         qd->qd_bh_qc = (struct gfs2_quota_change *)
293                 (bh->b_data + sizeof(struct gfs2_meta_header) +
294                  offset * sizeof(struct gfs2_quota_change));
295
296         mutex_unlock(&sdp->sd_quota_mutex);
297
298         return 0;
299
300 fail_brelse:
301         brelse(bh);
302 fail:
303         qd->qd_bh_count--;
304         mutex_unlock(&sdp->sd_quota_mutex);
305         return error;
306 }
307
308 static void bh_put(struct gfs2_quota_data *qd)
309 {
310         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
311
312         mutex_lock(&sdp->sd_quota_mutex);
313         gfs2_assert(sdp, qd->qd_bh_count);
314         if (!--qd->qd_bh_count) {
315                 brelse(qd->qd_bh);
316                 qd->qd_bh = NULL;
317                 qd->qd_bh_qc = NULL;
318         }
319         mutex_unlock(&sdp->sd_quota_mutex);
320 }
321
322 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
323 {
324         struct gfs2_quota_data *qd = NULL;
325         int error;
326         int found = 0;
327
328         *qdp = NULL;
329
330         if (sdp->sd_vfs->s_flags & MS_RDONLY)
331                 return 0;
332
333         spin_lock(&sdp->sd_quota_spin);
334
335         list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
336                 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
337                     !test_bit(QDF_CHANGE, &qd->qd_flags) ||
338                     qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
339                         continue;
340
341                 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
342
343                 set_bit(QDF_LOCKED, &qd->qd_flags);
344                 gfs2_assert_warn(sdp, qd->qd_count);
345                 qd->qd_count++;
346                 qd->qd_change_sync = qd->qd_change;
347                 gfs2_assert_warn(sdp, qd->qd_slot_count);
348                 qd->qd_slot_count++;
349                 found = 1;
350
351                 break;
352         }
353
354         if (!found)
355                 qd = NULL;
356
357         spin_unlock(&sdp->sd_quota_spin);
358
359         if (qd) {
360                 gfs2_assert_warn(sdp, qd->qd_change_sync);
361                 error = bh_get(qd);
362                 if (error) {
363                         clear_bit(QDF_LOCKED, &qd->qd_flags);
364                         slot_put(qd);
365                         qd_put(qd);
366                         return error;
367                 }
368         }
369
370         *qdp = qd;
371
372         return 0;
373 }
374
375 static int qd_trylock(struct gfs2_quota_data *qd)
376 {
377         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
378
379         if (sdp->sd_vfs->s_flags & MS_RDONLY)
380                 return 0;
381
382         spin_lock(&sdp->sd_quota_spin);
383
384         if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
385             !test_bit(QDF_CHANGE, &qd->qd_flags)) {
386                 spin_unlock(&sdp->sd_quota_spin);
387                 return 0;
388         }
389
390         list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
391
392         set_bit(QDF_LOCKED, &qd->qd_flags);
393         gfs2_assert_warn(sdp, qd->qd_count);
394         qd->qd_count++;
395         qd->qd_change_sync = qd->qd_change;
396         gfs2_assert_warn(sdp, qd->qd_slot_count);
397         qd->qd_slot_count++;
398
399         spin_unlock(&sdp->sd_quota_spin);
400
401         gfs2_assert_warn(sdp, qd->qd_change_sync);
402         if (bh_get(qd)) {
403                 clear_bit(QDF_LOCKED, &qd->qd_flags);
404                 slot_put(qd);
405                 qd_put(qd);
406                 return 0;
407         }
408
409         return 1;
410 }
411
412 static void qd_unlock(struct gfs2_quota_data *qd)
413 {
414         gfs2_assert_warn(qd->qd_gl->gl_sbd,
415                          test_bit(QDF_LOCKED, &qd->qd_flags));
416         clear_bit(QDF_LOCKED, &qd->qd_flags);
417         bh_put(qd);
418         slot_put(qd);
419         qd_put(qd);
420 }
421
422 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
423                     struct gfs2_quota_data **qdp)
424 {
425         int error;
426
427         error = qd_get(sdp, user, id, create, qdp);
428         if (error)
429                 return error;
430
431         error = slot_get(*qdp);
432         if (error)
433                 goto fail;
434
435         error = bh_get(*qdp);
436         if (error)
437                 goto fail_slot;
438
439         return 0;
440
441 fail_slot:
442         slot_put(*qdp);
443 fail:
444         qd_put(*qdp);
445         return error;
446 }
447
448 static void qdsb_put(struct gfs2_quota_data *qd)
449 {
450         bh_put(qd);
451         slot_put(qd);
452         qd_put(qd);
453 }
454
455 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
456 {
457         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
458         struct gfs2_alloc *al = ip->i_alloc;
459         struct gfs2_quota_data **qd = al->al_qd;
460         int error;
461
462         if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
463             gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
464                 return -EIO;
465
466         if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
467                 return 0;
468
469         error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
470         if (error)
471                 goto out;
472         al->al_qd_num++;
473         qd++;
474
475         error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
476         if (error)
477                 goto out;
478         al->al_qd_num++;
479         qd++;
480
481         if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
482                 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
483                 if (error)
484                         goto out;
485                 al->al_qd_num++;
486                 qd++;
487         }
488
489         if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
490                 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
491                 if (error)
492                         goto out;
493                 al->al_qd_num++;
494                 qd++;
495         }
496
497 out:
498         if (error)
499                 gfs2_quota_unhold(ip);
500         return error;
501 }
502
503 void gfs2_quota_unhold(struct gfs2_inode *ip)
504 {
505         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
506         struct gfs2_alloc *al = ip->i_alloc;
507         unsigned int x;
508
509         gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
510
511         for (x = 0; x < al->al_qd_num; x++) {
512                 qdsb_put(al->al_qd[x]);
513                 al->al_qd[x] = NULL;
514         }
515         al->al_qd_num = 0;
516 }
517
518 static int sort_qd(const void *a, const void *b)
519 {
520         const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
521         const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
522
523         if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
524             !test_bit(QDF_USER, &qd_b->qd_flags)) {
525                 if (test_bit(QDF_USER, &qd_a->qd_flags))
526                         return -1;
527                 else
528                         return 1;
529         }
530         if (qd_a->qd_id < qd_b->qd_id)
531                 return -1;
532         if (qd_a->qd_id > qd_b->qd_id)
533                 return 1;
534
535         return 0;
536 }
537
538 static void do_qc(struct gfs2_quota_data *qd, s64 change)
539 {
540         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
541         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
542         struct gfs2_quota_change *qc = qd->qd_bh_qc;
543         s64 x;
544
545         mutex_lock(&sdp->sd_quota_mutex);
546         gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
547
548         if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
549                 qc->qc_change = 0;
550                 qc->qc_flags = 0;
551                 if (test_bit(QDF_USER, &qd->qd_flags))
552                         qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
553                 qc->qc_id = cpu_to_be32(qd->qd_id);
554         }
555
556         x = be64_to_cpu(qc->qc_change) + change;
557         qc->qc_change = cpu_to_be64(x);
558
559         spin_lock(&sdp->sd_quota_spin);
560         qd->qd_change = x;
561         spin_unlock(&sdp->sd_quota_spin);
562
563         if (!x) {
564                 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
565                 clear_bit(QDF_CHANGE, &qd->qd_flags);
566                 qc->qc_flags = 0;
567                 qc->qc_id = 0;
568                 slot_put(qd);
569                 qd_put(qd);
570         } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
571                 qd_hold(qd);
572                 slot_hold(qd);
573         }
574
575         mutex_unlock(&sdp->sd_quota_mutex);
576 }
577
578 static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
579 {
580         const struct gfs2_quota *str = buf;
581
582         qu->qu_limit = be64_to_cpu(str->qu_limit);
583         qu->qu_warn = be64_to_cpu(str->qu_warn);
584         qu->qu_value = be64_to_cpu(str->qu_value);
585         qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
586 }
587
588 static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
589 {
590         struct gfs2_quota *str = buf;
591
592         str->qu_limit = cpu_to_be64(qu->qu_limit);
593         str->qu_warn = cpu_to_be64(qu->qu_warn);
594         str->qu_value = cpu_to_be64(qu->qu_value);
595         str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
596         memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
597 }
598
599 /**
600  * gfs2_adjust_quota
601  *
602  * This function was mostly borrowed from gfs2_block_truncate_page which was
603  * in turn mostly borrowed from ext3
604  */
605 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
606                              s64 change, struct gfs2_quota_data *qd)
607 {
608         struct inode *inode = &ip->i_inode;
609         struct address_space *mapping = inode->i_mapping;
610         unsigned long index = loc >> PAGE_CACHE_SHIFT;
611         unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
612         unsigned blocksize, iblock, pos;
613         struct buffer_head *bh;
614         struct page *page;
615         void *kaddr;
616         char *ptr;
617         struct gfs2_quota_host qp;
618         s64 value;
619         int err = -EIO;
620
621         if (gfs2_is_stuffed(ip))
622                 gfs2_unstuff_dinode(ip, NULL);
623         
624         page = grab_cache_page(mapping, index);
625         if (!page)
626                 return -ENOMEM;
627
628         blocksize = inode->i_sb->s_blocksize;
629         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
630
631         if (!page_has_buffers(page))
632                 create_empty_buffers(page, blocksize, 0);
633
634         bh = page_buffers(page);
635         pos = blocksize;
636         while (offset >= pos) {
637                 bh = bh->b_this_page;
638                 iblock++;
639                 pos += blocksize;
640         }
641
642         if (!buffer_mapped(bh)) {
643                 gfs2_block_map(inode, iblock, bh, 1);
644                 if (!buffer_mapped(bh))
645                         goto unlock;
646         }
647
648         if (PageUptodate(page))
649                 set_buffer_uptodate(bh);
650
651         if (!buffer_uptodate(bh)) {
652                 ll_rw_block(READ_META, 1, &bh);
653                 wait_on_buffer(bh);
654                 if (!buffer_uptodate(bh))
655                         goto unlock;
656         }
657
658         gfs2_trans_add_bh(ip->i_gl, bh, 0);
659
660         kaddr = kmap_atomic(page, KM_USER0);
661         ptr = kaddr + offset;
662         gfs2_quota_in(&qp, ptr);
663         qp.qu_value += change;
664         value = qp.qu_value;
665         gfs2_quota_out(&qp, ptr);
666         flush_dcache_page(page);
667         kunmap_atomic(kaddr, KM_USER0);
668         err = 0;
669         qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
670         qd->qd_qb.qb_value = cpu_to_be64(value);
671         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
672         ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
673 unlock:
674         unlock_page(page);
675         page_cache_release(page);
676         return err;
677 }
678
679 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
680 {
681         struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
682         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
683         unsigned int data_blocks, ind_blocks;
684         struct gfs2_holder *ghs, i_gh;
685         unsigned int qx, x;
686         struct gfs2_quota_data *qd;
687         loff_t offset;
688         unsigned int nalloc = 0, blocks;
689         struct gfs2_alloc *al = NULL;
690         int error;
691
692         gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
693                               &data_blocks, &ind_blocks);
694
695         ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
696         if (!ghs)
697                 return -ENOMEM;
698
699         sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
700         for (qx = 0; qx < num_qd; qx++) {
701                 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
702                                            LM_ST_EXCLUSIVE,
703                                            GL_NOCACHE, &ghs[qx]);
704                 if (error)
705                         goto out;
706         }
707
708         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
709         if (error)
710                 goto out;
711
712         for (x = 0; x < num_qd; x++) {
713                 int alloc_required;
714
715                 offset = qd2offset(qda[x]);
716                 error = gfs2_write_alloc_required(ip, offset,
717                                                   sizeof(struct gfs2_quota),
718                                                   &alloc_required);
719                 if (error)
720                         goto out_gunlock;
721                 if (alloc_required)
722                         nalloc++;
723         }
724
725         al = gfs2_alloc_get(ip);
726         if (!al) {
727                 error = -ENOMEM;
728                 goto out_gunlock;
729         }
730         /* 
731          * 1 blk for unstuffing inode if stuffed. We add this extra
732          * block to the reservation unconditionally. If the inode
733          * doesn't need unstuffing, the block will be released to the 
734          * rgrp since it won't be allocated during the transaction
735          */
736         al->al_requested = 1;
737         /* +1 in the end for block requested above for unstuffing */
738         blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
739
740         if (nalloc)
741                 al->al_requested += nalloc * (data_blocks + ind_blocks);                
742         error = gfs2_inplace_reserve(ip);
743         if (error)
744                 goto out_alloc;
745
746         if (nalloc)
747                 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
748
749         error = gfs2_trans_begin(sdp, blocks, 0);
750         if (error)
751                 goto out_ipres;
752
753         for (x = 0; x < num_qd; x++) {
754                 qd = qda[x];
755                 offset = qd2offset(qd);
756                 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
757                                           (struct gfs2_quota_data *)
758                                           qd);
759                 if (error)
760                         goto out_end_trans;
761
762                 do_qc(qd, -qd->qd_change_sync);
763         }
764
765         error = 0;
766
767 out_end_trans:
768         gfs2_trans_end(sdp);
769 out_ipres:
770         gfs2_inplace_release(ip);
771 out_alloc:
772         gfs2_alloc_put(ip);
773 out_gunlock:
774         gfs2_glock_dq_uninit(&i_gh);
775 out:
776         while (qx--)
777                 gfs2_glock_dq_uninit(&ghs[qx]);
778         kfree(ghs);
779         gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
780         return error;
781 }
782
783 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
784                     struct gfs2_holder *q_gh)
785 {
786         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
787         struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
788         struct gfs2_holder i_gh;
789         struct gfs2_quota_host q;
790         char buf[sizeof(struct gfs2_quota)];
791         int error;
792         struct gfs2_quota_lvb *qlvb;
793
794 restart:
795         error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
796         if (error)
797                 return error;
798
799         qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
800
801         if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
802                 loff_t pos;
803                 gfs2_glock_dq_uninit(q_gh);
804                 error = gfs2_glock_nq_init(qd->qd_gl,
805                                           LM_ST_EXCLUSIVE, GL_NOCACHE,
806                                           q_gh);
807                 if (error)
808                         return error;
809
810                 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
811                 if (error)
812                         goto fail;
813
814                 memset(buf, 0, sizeof(struct gfs2_quota));
815                 pos = qd2offset(qd);
816                 error = gfs2_internal_read(ip, NULL, buf, &pos,
817                                            sizeof(struct gfs2_quota));
818                 if (error < 0)
819                         goto fail_gunlock;
820
821                 gfs2_glock_dq_uninit(&i_gh);
822
823
824                 gfs2_quota_in(&q, buf);
825                 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
826                 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
827                 qlvb->__pad = 0;
828                 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
829                 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
830                 qlvb->qb_value = cpu_to_be64(q.qu_value);
831                 qd->qd_qb = *qlvb;
832
833                 if (gfs2_glock_is_blocking(qd->qd_gl)) {
834                         gfs2_glock_dq_uninit(q_gh);
835                         force_refresh = 0;
836                         goto restart;
837                 }
838         }
839
840         return 0;
841
842 fail_gunlock:
843         gfs2_glock_dq_uninit(&i_gh);
844 fail:
845         gfs2_glock_dq_uninit(q_gh);
846         return error;
847 }
848
849 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
850 {
851         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
852         struct gfs2_alloc *al = ip->i_alloc;
853         unsigned int x;
854         int error = 0;
855
856         gfs2_quota_hold(ip, uid, gid);
857
858         if (capable(CAP_SYS_RESOURCE) ||
859             sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
860                 return 0;
861
862         sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
863              sort_qd, NULL);
864
865         for (x = 0; x < al->al_qd_num; x++) {
866                 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
867                 if (error)
868                         break;
869         }
870
871         if (!error)
872                 set_bit(GIF_QD_LOCKED, &ip->i_flags);
873         else {
874                 while (x--)
875                         gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
876                 gfs2_quota_unhold(ip);
877         }
878
879         return error;
880 }
881
882 static int need_sync(struct gfs2_quota_data *qd)
883 {
884         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
885         struct gfs2_tune *gt = &sdp->sd_tune;
886         s64 value;
887         unsigned int num, den;
888         int do_sync = 1;
889
890         if (!qd->qd_qb.qb_limit)
891                 return 0;
892
893         spin_lock(&sdp->sd_quota_spin);
894         value = qd->qd_change;
895         spin_unlock(&sdp->sd_quota_spin);
896
897         spin_lock(&gt->gt_spin);
898         num = gt->gt_quota_scale_num;
899         den = gt->gt_quota_scale_den;
900         spin_unlock(&gt->gt_spin);
901
902         if (value < 0)
903                 do_sync = 0;
904         else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
905                  (s64)be64_to_cpu(qd->qd_qb.qb_limit))
906                 do_sync = 0;
907         else {
908                 value *= gfs2_jindex_size(sdp) * num;
909                 value = div_s64(value, den);
910                 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
911                 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
912                         do_sync = 0;
913         }
914
915         return do_sync;
916 }
917
918 void gfs2_quota_unlock(struct gfs2_inode *ip)
919 {
920         struct gfs2_alloc *al = ip->i_alloc;
921         struct gfs2_quota_data *qda[4];
922         unsigned int count = 0;
923         unsigned int x;
924
925         if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
926                 goto out;
927
928         for (x = 0; x < al->al_qd_num; x++) {
929                 struct gfs2_quota_data *qd;
930                 int sync;
931
932                 qd = al->al_qd[x];
933                 sync = need_sync(qd);
934
935                 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
936
937                 if (sync && qd_trylock(qd))
938                         qda[count++] = qd;
939         }
940
941         if (count) {
942                 do_sync(count, qda);
943                 for (x = 0; x < count; x++)
944                         qd_unlock(qda[x]);
945         }
946
947 out:
948         gfs2_quota_unhold(ip);
949 }
950
951 #define MAX_LINE 256
952
953 static int print_message(struct gfs2_quota_data *qd, char *type)
954 {
955         struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
956
957         printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
958                sdp->sd_fsname, type,
959                (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
960                qd->qd_id);
961
962         return 0;
963 }
964
965 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
966 {
967         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
968         struct gfs2_alloc *al = ip->i_alloc;
969         struct gfs2_quota_data *qd;
970         s64 value;
971         unsigned int x;
972         int error = 0;
973
974         if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
975                 return 0;
976
977         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
978                 return 0;
979
980         for (x = 0; x < al->al_qd_num; x++) {
981                 qd = al->al_qd[x];
982
983                 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
984                       (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
985                         continue;
986
987                 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
988                 spin_lock(&sdp->sd_quota_spin);
989                 value += qd->qd_change;
990                 spin_unlock(&sdp->sd_quota_spin);
991
992                 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
993                         print_message(qd, "exceeded");
994                         error = -EDQUOT;
995                         break;
996                 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
997                            (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
998                            time_after_eq(jiffies, qd->qd_last_warn +
999                                          gfs2_tune_get(sdp,
1000                                                 gt_quota_warn_period) * HZ)) {
1001                         error = print_message(qd, "warning");
1002                         qd->qd_last_warn = jiffies;
1003                 }
1004         }
1005
1006         return error;
1007 }
1008
1009 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1010                        u32 uid, u32 gid)
1011 {
1012         struct gfs2_alloc *al = ip->i_alloc;
1013         struct gfs2_quota_data *qd;
1014         unsigned int x;
1015
1016         if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1017                 return;
1018         if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1019                 return;
1020
1021         for (x = 0; x < al->al_qd_num; x++) {
1022                 qd = al->al_qd[x];
1023
1024                 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1025                     (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1026                         do_qc(qd, change);
1027                 }
1028         }
1029 }
1030
1031 int gfs2_quota_sync(struct gfs2_sbd *sdp)
1032 {
1033         struct gfs2_quota_data **qda;
1034         unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1035         unsigned int num_qd;
1036         unsigned int x;
1037         int error = 0;
1038
1039         sdp->sd_quota_sync_gen++;
1040
1041         qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1042         if (!qda)
1043                 return -ENOMEM;
1044
1045         do {
1046                 num_qd = 0;
1047
1048                 for (;;) {
1049                         error = qd_fish(sdp, qda + num_qd);
1050                         if (error || !qda[num_qd])
1051                                 break;
1052                         if (++num_qd == max_qd)
1053                                 break;
1054                 }
1055
1056                 if (num_qd) {
1057                         if (!error)
1058                                 error = do_sync(num_qd, qda);
1059                         if (!error)
1060                                 for (x = 0; x < num_qd; x++)
1061                                         qda[x]->qd_sync_gen =
1062                                                 sdp->sd_quota_sync_gen;
1063
1064                         for (x = 0; x < num_qd; x++)
1065                                 qd_unlock(qda[x]);
1066                 }
1067         } while (!error && num_qd == max_qd);
1068
1069         kfree(qda);
1070
1071         return error;
1072 }
1073
1074 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1075 {
1076         struct gfs2_quota_data *qd;
1077         struct gfs2_holder q_gh;
1078         int error;
1079
1080         error = qd_get(sdp, user, id, CREATE, &qd);
1081         if (error)
1082                 return error;
1083
1084         error = do_glock(qd, FORCE, &q_gh);
1085         if (!error)
1086                 gfs2_glock_dq_uninit(&q_gh);
1087
1088         qd_put(qd);
1089
1090         return error;
1091 }
1092
1093 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1094 {
1095         const struct gfs2_quota_change *str = buf;
1096
1097         qc->qc_change = be64_to_cpu(str->qc_change);
1098         qc->qc_flags = be32_to_cpu(str->qc_flags);
1099         qc->qc_id = be32_to_cpu(str->qc_id);
1100 }
1101
1102 int gfs2_quota_init(struct gfs2_sbd *sdp)
1103 {
1104         struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1105         unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
1106         unsigned int x, slot = 0;
1107         unsigned int found = 0;
1108         u64 dblock;
1109         u32 extlen = 0;
1110         int error;
1111
1112         if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
1113             ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
1114                 gfs2_consist_inode(ip);
1115                 return -EIO;
1116         }
1117         sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1118         sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1119
1120         error = -ENOMEM;
1121
1122         sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1123                                        sizeof(unsigned char *), GFP_NOFS);
1124         if (!sdp->sd_quota_bitmap)
1125                 return error;
1126
1127         for (x = 0; x < sdp->sd_quota_chunks; x++) {
1128                 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1129                 if (!sdp->sd_quota_bitmap[x])
1130                         goto fail;
1131         }
1132
1133         for (x = 0; x < blocks; x++) {
1134                 struct buffer_head *bh;
1135                 unsigned int y;
1136
1137                 if (!extlen) {
1138                         int new = 0;
1139                         error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1140                         if (error)
1141                                 goto fail;
1142                 }
1143                 error = -EIO;
1144                 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1145                 if (!bh)
1146                         goto fail;
1147                 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1148                         brelse(bh);
1149                         goto fail;
1150                 }
1151
1152                 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1153                      y++, slot++) {
1154                         struct gfs2_quota_change_host qc;
1155                         struct gfs2_quota_data *qd;
1156
1157                         gfs2_quota_change_in(&qc, bh->b_data +
1158                                           sizeof(struct gfs2_meta_header) +
1159                                           y * sizeof(struct gfs2_quota_change));
1160                         if (!qc.qc_change)
1161                                 continue;
1162
1163                         error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1164                                          qc.qc_id, &qd);
1165                         if (error) {
1166                                 brelse(bh);
1167                                 goto fail;
1168                         }
1169
1170                         set_bit(QDF_CHANGE, &qd->qd_flags);
1171                         qd->qd_change = qc.qc_change;
1172                         qd->qd_slot = slot;
1173                         qd->qd_slot_count = 1;
1174                         qd->qd_last_touched = jiffies;
1175
1176                         spin_lock(&sdp->sd_quota_spin);
1177                         gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1178                         list_add(&qd->qd_list, &sdp->sd_quota_list);
1179                         atomic_inc(&sdp->sd_quota_count);
1180                         spin_unlock(&sdp->sd_quota_spin);
1181
1182                         found++;
1183                 }
1184
1185                 brelse(bh);
1186                 dblock++;
1187                 extlen--;
1188         }
1189
1190         if (found)
1191                 fs_info(sdp, "found %u quota changes\n", found);
1192
1193         return 0;
1194
1195 fail:
1196         gfs2_quota_cleanup(sdp);
1197         return error;
1198 }
1199
1200 static void gfs2_quota_scan(struct gfs2_sbd *sdp)
1201 {
1202         struct gfs2_quota_data *qd, *safe;
1203         LIST_HEAD(dead);
1204
1205         spin_lock(&sdp->sd_quota_spin);
1206         list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1207                 if (!qd->qd_count &&
1208                     time_after_eq(jiffies, qd->qd_last_touched +
1209                                 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1210                         list_move(&qd->qd_list, &dead);
1211                         gfs2_assert_warn(sdp,
1212                                          atomic_read(&sdp->sd_quota_count) > 0);
1213                         atomic_dec(&sdp->sd_quota_count);
1214                 }
1215         }
1216         spin_unlock(&sdp->sd_quota_spin);
1217
1218         while (!list_empty(&dead)) {
1219                 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1220                 list_del(&qd->qd_list);
1221
1222                 gfs2_assert_warn(sdp, !qd->qd_change);
1223                 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1224                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1225
1226                 gfs2_lvb_unhold(qd->qd_gl);
1227                 kmem_cache_free(gfs2_quotad_cachep, qd);
1228         }
1229 }
1230
1231 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1232 {
1233         struct list_head *head = &sdp->sd_quota_list;
1234         struct gfs2_quota_data *qd;
1235         unsigned int x;
1236
1237         spin_lock(&sdp->sd_quota_spin);
1238         while (!list_empty(head)) {
1239                 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1240
1241                 if (qd->qd_count > 1 ||
1242                     (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1243                         list_move(&qd->qd_list, head);
1244                         spin_unlock(&sdp->sd_quota_spin);
1245                         schedule();
1246                         spin_lock(&sdp->sd_quota_spin);
1247                         continue;
1248                 }
1249
1250                 list_del(&qd->qd_list);
1251                 atomic_dec(&sdp->sd_quota_count);
1252                 spin_unlock(&sdp->sd_quota_spin);
1253
1254                 if (!qd->qd_count) {
1255                         gfs2_assert_warn(sdp, !qd->qd_change);
1256                         gfs2_assert_warn(sdp, !qd->qd_slot_count);
1257                 } else
1258                         gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1259                 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1260
1261                 gfs2_lvb_unhold(qd->qd_gl);
1262                 kmem_cache_free(gfs2_quotad_cachep, qd);
1263
1264                 spin_lock(&sdp->sd_quota_spin);
1265         }
1266         spin_unlock(&sdp->sd_quota_spin);
1267
1268         gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1269
1270         if (sdp->sd_quota_bitmap) {
1271                 for (x = 0; x < sdp->sd_quota_chunks; x++)
1272                         kfree(sdp->sd_quota_bitmap[x]);
1273                 kfree(sdp->sd_quota_bitmap);
1274         }
1275 }
1276
1277 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1278 {
1279         if (error == 0 || error == -EROFS)
1280                 return;
1281         if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1282                 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1283 }
1284
1285 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1286                                int (*fxn)(struct gfs2_sbd *sdp),
1287                                unsigned long t, unsigned long *timeo,
1288                                unsigned int *new_timeo)
1289 {
1290         if (t >= *timeo) {
1291                 int error = fxn(sdp);
1292                 quotad_error(sdp, msg, error);
1293                 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1294         } else {
1295                 *timeo -= t;
1296         }
1297 }
1298
1299 static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1300 {
1301         struct gfs2_inode *ip;
1302
1303         while(1) {
1304                 ip = NULL;
1305                 spin_lock(&sdp->sd_trunc_lock);
1306                 if (!list_empty(&sdp->sd_trunc_list)) {
1307                         ip = list_entry(sdp->sd_trunc_list.next,
1308                                         struct gfs2_inode, i_trunc_list);
1309                         list_del_init(&ip->i_trunc_list);
1310                 }
1311                 spin_unlock(&sdp->sd_trunc_lock);
1312                 if (ip == NULL)
1313                         return;
1314                 gfs2_glock_finish_truncate(ip);
1315         }
1316 }
1317
1318 /**
1319  * gfs2_quotad - Write cached quota changes into the quota file
1320  * @sdp: Pointer to GFS2 superblock
1321  *
1322  */
1323
1324 int gfs2_quotad(void *data)
1325 {
1326         struct gfs2_sbd *sdp = data;
1327         struct gfs2_tune *tune = &sdp->sd_tune;
1328         unsigned long statfs_timeo = 0;
1329         unsigned long quotad_timeo = 0;
1330         unsigned long t = 0;
1331         DEFINE_WAIT(wait);
1332         int empty;
1333
1334         while (!kthread_should_stop()) {
1335
1336                 /* Update the master statfs file */
1337                 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1338                                    &statfs_timeo, &tune->gt_statfs_quantum);
1339
1340                 /* Update quota file */
1341                 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1342                                    &quotad_timeo, &tune->gt_quota_quantum);
1343
1344                 /* FIXME: This should be turned into a shrinker */
1345                 gfs2_quota_scan(sdp);
1346
1347                 /* Check for & recover partially truncated inodes */
1348                 quotad_check_trunc_list(sdp);
1349
1350                 if (freezing(current))
1351                         refrigerator();
1352                 t = min(quotad_timeo, statfs_timeo);
1353
1354                 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE);
1355                 spin_lock(&sdp->sd_trunc_lock);
1356                 empty = list_empty(&sdp->sd_trunc_list);
1357                 spin_unlock(&sdp->sd_trunc_lock);
1358                 if (empty)
1359                         t -= schedule_timeout(t);
1360                 else
1361                         t = 0;
1362                 finish_wait(&sdp->sd_quota_wait, &wait);
1363         }
1364
1365         return 0;
1366 }
1367