Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6] / fs / dquot.c
1 /*
2  * Implementation of the diskquota system for the LINUX operating system. QUOTA
3  * is implemented using the BSD system call interface as the means of
4  * communication with the user level. This file contains the generic routines
5  * called by the different filesystems on allocation of an inode or block.
6  * These routines take care of the administration needed to have a consistent
7  * diskquota tracking system. The ideas of both user and group quotas are based
8  * on the Melbourne quota system as used on BSD derived systems. The internal
9  * implementation is based on one of the several variants of the LINUX
10  * inode-subsystem with added complexity of the diskquota system.
11  * 
12  * Author:      Marco van Wieringen <mvw@planets.elm.net>
13  *
14  * Fixes:   Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96
15  *
16  *              Revised list management to avoid races
17  *              -- Bill Hawes, <whawes@star.net>, 9/98
18  *
19  *              Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
20  *              As the consequence the locking was moved from dquot_decr_...(),
21  *              dquot_incr_...() to calling functions.
22  *              invalidate_dquots() now writes modified dquots.
23  *              Serialized quota_off() and quota_on() for mount point.
24  *              Fixed a few bugs in grow_dquots().
25  *              Fixed deadlock in write_dquot() - we no longer account quotas on
26  *              quota files
27  *              remove_dquot_ref() moved to inode.c - it now traverses through inodes
28  *              add_dquot_ref() restarts after blocking
29  *              Added check for bogus uid and fixed check for group in quotactl.
30  *              Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99
31  *
32  *              Used struct list_head instead of own list struct
33  *              Invalidation of referenced dquots is no longer possible
34  *              Improved free_dquots list management
35  *              Quota and i_blocks are now updated in one place to avoid races
36  *              Warnings are now delayed so we won't block in critical section
37  *              Write updated not to require dquot lock
38  *              Jan Kara, <jack@suse.cz>, 9/2000
39  *
40  *              Added dynamic quota structure allocation
41  *              Jan Kara <jack@suse.cz> 12/2000
42  *
43  *              Rewritten quota interface. Implemented new quota format and
44  *              formats registering.
45  *              Jan Kara, <jack@suse.cz>, 2001,2002
46  *
47  *              New SMP locking.
48  *              Jan Kara, <jack@suse.cz>, 10/2002
49  *
50  *              Added journalled quota support, fix lock inversion problems
51  *              Jan Kara, <jack@suse.cz>, 2003,2004
52  *
53  * (C) Copyright 1994 - 1997 Marco van Wieringen 
54  */
55
56 #include <linux/errno.h>
57 #include <linux/kernel.h>
58 #include <linux/fs.h>
59 #include <linux/mount.h>
60 #include <linux/mm.h>
61 #include <linux/time.h>
62 #include <linux/types.h>
63 #include <linux/string.h>
64 #include <linux/fcntl.h>
65 #include <linux/stat.h>
66 #include <linux/tty.h>
67 #include <linux/file.h>
68 #include <linux/slab.h>
69 #include <linux/sysctl.h>
70 #include <linux/init.h>
71 #include <linux/module.h>
72 #include <linux/proc_fs.h>
73 #include <linux/security.h>
74 #include <linux/kmod.h>
75 #include <linux/namei.h>
76 #include <linux/buffer_head.h>
77 #include <linux/capability.h>
78 #include <linux/quotaops.h>
79 #include <linux/writeback.h> /* for inode_lock, oddly enough.. */
80 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
81 #include <net/netlink.h>
82 #include <net/genetlink.h>
83 #endif
84
85 #include <asm/uaccess.h>
86
87 #define __DQUOT_PARANOIA
88
89 /*
90  * There are two quota SMP locks. dq_list_lock protects all lists with quotas
91  * and quota formats and also dqstats structure containing statistics about the
92  * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
93  * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
94  * i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
95  * in inode_add_bytes() and inode_sub_bytes().
96  *
97  * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock
98  *
99  * Note that some things (eg. sb pointer, type, id) doesn't change during
100  * the life of the dquot structure and so needn't to be protected by a lock
101  *
102  * Any operation working on dquots via inode pointers must hold dqptr_sem.  If
103  * operation is just reading pointers from inode (or not using them at all) the
104  * read lock is enough. If pointers are altered function must hold write lock
105  * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
106  * for altering the flag i_mutex is also needed).  If operation is holding
107  * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
108  * dqonoff_mutex.
109  * This locking assures that:
110  *   a) update/access to dquot pointers in inode is serialized
111  *   b) everyone is guarded against invalidate_dquots()
112  *
113  * Each dquot has its dq_lock mutex. Locked dquots might not be referenced
114  * from inodes (dquot_alloc_space() and such don't check the dq_lock).
115  * Currently dquot is locked only when it is being read to memory (or space for
116  * it is being allocated) on the first dqget() and when it is being released on
117  * the last dqput(). The allocation and release oparations are serialized by
118  * the dq_lock and by checking the use count in dquot_release().  Write
119  * operations on dquots don't hold dq_lock as they copy data under dq_data_lock
120  * spinlock to internal buffers before writing.
121  *
122  * Lock ordering (including related VFS locks) is the following:
123  *   i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
124  *   dqio_mutex
125  * i_mutex on quota files is special (it's below dqio_mutex)
126  */
127
128 static DEFINE_SPINLOCK(dq_list_lock);
129 DEFINE_SPINLOCK(dq_data_lock);
130
131 static char *quotatypes[] = INITQFNAMES;
132 static struct quota_format_type *quota_formats; /* List of registered formats */
133 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
134
135 /* SLAB cache for dquot structures */
136 static struct kmem_cache *dquot_cachep;
137
138 int register_quota_format(struct quota_format_type *fmt)
139 {
140         spin_lock(&dq_list_lock);
141         fmt->qf_next = quota_formats;
142         quota_formats = fmt;
143         spin_unlock(&dq_list_lock);
144         return 0;
145 }
146
147 void unregister_quota_format(struct quota_format_type *fmt)
148 {
149         struct quota_format_type **actqf;
150
151         spin_lock(&dq_list_lock);
152         for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
153         if (*actqf)
154                 *actqf = (*actqf)->qf_next;
155         spin_unlock(&dq_list_lock);
156 }
157
158 static struct quota_format_type *find_quota_format(int id)
159 {
160         struct quota_format_type *actqf;
161
162         spin_lock(&dq_list_lock);
163         for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
164         if (!actqf || !try_module_get(actqf->qf_owner)) {
165                 int qm;
166
167                 spin_unlock(&dq_list_lock);
168                 
169                 for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
170                 if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
171                         return NULL;
172
173                 spin_lock(&dq_list_lock);
174                 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
175                 if (actqf && !try_module_get(actqf->qf_owner))
176                         actqf = NULL;
177         }
178         spin_unlock(&dq_list_lock);
179         return actqf;
180 }
181
182 static void put_quota_format(struct quota_format_type *fmt)
183 {
184         module_put(fmt->qf_owner);
185 }
186
187 /*
188  * Dquot List Management:
189  * The quota code uses three lists for dquot management: the inuse_list,
190  * free_dquots, and dquot_hash[] array. A single dquot structure may be
191  * on all three lists, depending on its current state.
192  *
193  * All dquots are placed to the end of inuse_list when first created, and this
194  * list is used for invalidate operation, which must look at every dquot.
195  *
196  * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
197  * and this list is searched whenever we need an available dquot.  Dquots are
198  * removed from the list as soon as they are used again, and
199  * dqstats.free_dquots gives the number of dquots on the list. When
200  * dquot is invalidated it's completely released from memory.
201  *
202  * Dquots with a specific identity (device, type and id) are placed on
203  * one of the dquot_hash[] hash chains. The provides an efficient search
204  * mechanism to locate a specific dquot.
205  */
206
207 static LIST_HEAD(inuse_list);
208 static LIST_HEAD(free_dquots);
209 static unsigned int dq_hash_bits, dq_hash_mask;
210 static struct hlist_head *dquot_hash;
211
212 struct dqstats dqstats;
213
214 static void dqput(struct dquot *dquot);
215
216 static inline unsigned int
217 hashfn(const struct super_block *sb, unsigned int id, int type)
218 {
219         unsigned long tmp;
220
221         tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
222         return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
223 }
224
225 /*
226  * Following list functions expect dq_list_lock to be held
227  */
228 static inline void insert_dquot_hash(struct dquot *dquot)
229 {
230         struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
231         hlist_add_head(&dquot->dq_hash, head);
232 }
233
234 static inline void remove_dquot_hash(struct dquot *dquot)
235 {
236         hlist_del_init(&dquot->dq_hash);
237 }
238
239 static inline struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, unsigned int id, int type)
240 {
241         struct hlist_node *node;
242         struct dquot *dquot;
243
244         hlist_for_each (node, dquot_hash+hashent) {
245                 dquot = hlist_entry(node, struct dquot, dq_hash);
246                 if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
247                         return dquot;
248         }
249         return NODQUOT;
250 }
251
252 /* Add a dquot to the tail of the free list */
253 static inline void put_dquot_last(struct dquot *dquot)
254 {
255         list_add_tail(&dquot->dq_free, &free_dquots);
256         dqstats.free_dquots++;
257 }
258
259 static inline void remove_free_dquot(struct dquot *dquot)
260 {
261         if (list_empty(&dquot->dq_free))
262                 return;
263         list_del_init(&dquot->dq_free);
264         dqstats.free_dquots--;
265 }
266
267 static inline void put_inuse(struct dquot *dquot)
268 {
269         /* We add to the back of inuse list so we don't have to restart
270          * when traversing this list and we block */
271         list_add_tail(&dquot->dq_inuse, &inuse_list);
272         dqstats.allocated_dquots++;
273 }
274
275 static inline void remove_inuse(struct dquot *dquot)
276 {
277         dqstats.allocated_dquots--;
278         list_del(&dquot->dq_inuse);
279 }
280 /*
281  * End of list functions needing dq_list_lock
282  */
283
284 static void wait_on_dquot(struct dquot *dquot)
285 {
286         mutex_lock(&dquot->dq_lock);
287         mutex_unlock(&dquot->dq_lock);
288 }
289
290 static inline int dquot_dirty(struct dquot *dquot)
291 {
292         return test_bit(DQ_MOD_B, &dquot->dq_flags);
293 }
294
295 static inline int mark_dquot_dirty(struct dquot *dquot)
296 {
297         return dquot->dq_sb->dq_op->mark_dirty(dquot);
298 }
299
300 int dquot_mark_dquot_dirty(struct dquot *dquot)
301 {
302         spin_lock(&dq_list_lock);
303         if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags))
304                 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
305                                 info[dquot->dq_type].dqi_dirty_list);
306         spin_unlock(&dq_list_lock);
307         return 0;
308 }
309
310 /* This function needs dq_list_lock */
311 static inline int clear_dquot_dirty(struct dquot *dquot)
312 {
313         if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags))
314                 return 0;
315         list_del_init(&dquot->dq_dirty);
316         return 1;
317 }
318
319 void mark_info_dirty(struct super_block *sb, int type)
320 {
321         set_bit(DQF_INFO_DIRTY_B, &sb_dqopt(sb)->info[type].dqi_flags);
322 }
323 EXPORT_SYMBOL(mark_info_dirty);
324
325 /*
326  *      Read dquot from disk and alloc space for it
327  */
328
329 int dquot_acquire(struct dquot *dquot)
330 {
331         int ret = 0, ret2 = 0;
332         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
333
334         mutex_lock(&dquot->dq_lock);
335         mutex_lock(&dqopt->dqio_mutex);
336         if (!test_bit(DQ_READ_B, &dquot->dq_flags))
337                 ret = dqopt->ops[dquot->dq_type]->read_dqblk(dquot);
338         if (ret < 0)
339                 goto out_iolock;
340         set_bit(DQ_READ_B, &dquot->dq_flags);
341         /* Instantiate dquot if needed */
342         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
343                 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
344                 /* Write the info if needed */
345                 if (info_dirty(&dqopt->info[dquot->dq_type]))
346                         ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
347                 if (ret < 0)
348                         goto out_iolock;
349                 if (ret2 < 0) {
350                         ret = ret2;
351                         goto out_iolock;
352                 }
353         }
354         set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
355 out_iolock:
356         mutex_unlock(&dqopt->dqio_mutex);
357         mutex_unlock(&dquot->dq_lock);
358         return ret;
359 }
360
361 /*
362  *      Write dquot to disk
363  */
364 int dquot_commit(struct dquot *dquot)
365 {
366         int ret = 0, ret2 = 0;
367         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
368
369         mutex_lock(&dqopt->dqio_mutex);
370         spin_lock(&dq_list_lock);
371         if (!clear_dquot_dirty(dquot)) {
372                 spin_unlock(&dq_list_lock);
373                 goto out_sem;
374         }
375         spin_unlock(&dq_list_lock);
376         /* Inactive dquot can be only if there was error during read/init
377          * => we have better not writing it */
378         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
379                 ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
380                 if (info_dirty(&dqopt->info[dquot->dq_type]))
381                         ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
382                 if (ret >= 0)
383                         ret = ret2;
384         }
385 out_sem:
386         mutex_unlock(&dqopt->dqio_mutex);
387         return ret;
388 }
389
390 /*
391  *      Release dquot
392  */
393 int dquot_release(struct dquot *dquot)
394 {
395         int ret = 0, ret2 = 0;
396         struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
397
398         mutex_lock(&dquot->dq_lock);
399         /* Check whether we are not racing with some other dqget() */
400         if (atomic_read(&dquot->dq_count) > 1)
401                 goto out_dqlock;
402         mutex_lock(&dqopt->dqio_mutex);
403         if (dqopt->ops[dquot->dq_type]->release_dqblk) {
404                 ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
405                 /* Write the info */
406                 if (info_dirty(&dqopt->info[dquot->dq_type]))
407                         ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
408                 if (ret >= 0)
409                         ret = ret2;
410         }
411         clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
412         mutex_unlock(&dqopt->dqio_mutex);
413 out_dqlock:
414         mutex_unlock(&dquot->dq_lock);
415         return ret;
416 }
417
418 /* Invalidate all dquots on the list. Note that this function is called after
419  * quota is disabled and pointers from inodes removed so there cannot be new
420  * quota users. There can still be some users of quotas due to inodes being
421  * just deleted or pruned by prune_icache() (those are not attached to any
422  * list). We have to wait for such users.
423  */
424 static void invalidate_dquots(struct super_block *sb, int type)
425 {
426         struct dquot *dquot, *tmp;
427
428 restart:
429         spin_lock(&dq_list_lock);
430         list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
431                 if (dquot->dq_sb != sb)
432                         continue;
433                 if (dquot->dq_type != type)
434                         continue;
435                 /* Wait for dquot users */
436                 if (atomic_read(&dquot->dq_count)) {
437                         DEFINE_WAIT(wait);
438
439                         atomic_inc(&dquot->dq_count);
440                         prepare_to_wait(&dquot->dq_wait_unused, &wait,
441                                         TASK_UNINTERRUPTIBLE);
442                         spin_unlock(&dq_list_lock);
443                         /* Once dqput() wakes us up, we know it's time to free
444                          * the dquot.
445                          * IMPORTANT: we rely on the fact that there is always
446                          * at most one process waiting for dquot to free.
447                          * Otherwise dq_count would be > 1 and we would never
448                          * wake up.
449                          */
450                         if (atomic_read(&dquot->dq_count) > 1)
451                                 schedule();
452                         finish_wait(&dquot->dq_wait_unused, &wait);
453                         dqput(dquot);
454                         /* At this moment dquot() need not exist (it could be
455                          * reclaimed by prune_dqcache(). Hence we must
456                          * restart. */
457                         goto restart;
458                 }
459                 /*
460                  * Quota now has no users and it has been written on last
461                  * dqput()
462                  */
463                 remove_dquot_hash(dquot);
464                 remove_free_dquot(dquot);
465                 remove_inuse(dquot);
466                 kmem_cache_free(dquot_cachep, dquot);
467         }
468         spin_unlock(&dq_list_lock);
469 }
470
471 int vfs_quota_sync(struct super_block *sb, int type)
472 {
473         struct list_head *dirty;
474         struct dquot *dquot;
475         struct quota_info *dqopt = sb_dqopt(sb);
476         int cnt;
477
478         mutex_lock(&dqopt->dqonoff_mutex);
479         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
480                 if (type != -1 && cnt != type)
481                         continue;
482                 if (!sb_has_quota_enabled(sb, cnt))
483                         continue;
484                 spin_lock(&dq_list_lock);
485                 dirty = &dqopt->info[cnt].dqi_dirty_list;
486                 while (!list_empty(dirty)) {
487                         dquot = list_first_entry(dirty, struct dquot, dq_dirty);
488                         /* Dirty and inactive can be only bad dquot... */
489                         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
490                                 clear_dquot_dirty(dquot);
491                                 continue;
492                         }
493                         /* Now we have active dquot from which someone is
494                          * holding reference so we can safely just increase
495                          * use count */
496                         atomic_inc(&dquot->dq_count);
497                         dqstats.lookups++;
498                         spin_unlock(&dq_list_lock);
499                         sb->dq_op->write_dquot(dquot);
500                         dqput(dquot);
501                         spin_lock(&dq_list_lock);
502                 }
503                 spin_unlock(&dq_list_lock);
504         }
505
506         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
507                 if ((cnt == type || type == -1) && sb_has_quota_enabled(sb, cnt)
508                         && info_dirty(&dqopt->info[cnt]))
509                         sb->dq_op->write_info(sb, cnt);
510         spin_lock(&dq_list_lock);
511         dqstats.syncs++;
512         spin_unlock(&dq_list_lock);
513         mutex_unlock(&dqopt->dqonoff_mutex);
514
515         return 0;
516 }
517
518 /* Free unused dquots from cache */
519 static void prune_dqcache(int count)
520 {
521         struct list_head *head;
522         struct dquot *dquot;
523
524         head = free_dquots.prev;
525         while (head != &free_dquots && count) {
526                 dquot = list_entry(head, struct dquot, dq_free);
527                 remove_dquot_hash(dquot);
528                 remove_free_dquot(dquot);
529                 remove_inuse(dquot);
530                 kmem_cache_free(dquot_cachep, dquot);
531                 count--;
532                 head = free_dquots.prev;
533         }
534 }
535
536 /*
537  * This is called from kswapd when we think we need some
538  * more memory
539  */
540
541 static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
542 {
543         if (nr) {
544                 spin_lock(&dq_list_lock);
545                 prune_dqcache(nr);
546                 spin_unlock(&dq_list_lock);
547         }
548         return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
549 }
550
551 static struct shrinker dqcache_shrinker = {
552         .shrink = shrink_dqcache_memory,
553         .seeks = DEFAULT_SEEKS,
554 };
555
556 /*
557  * Put reference to dquot
558  * NOTE: If you change this function please check whether dqput_blocks() works right...
559  * MUST be called with either dqptr_sem or dqonoff_mutex held
560  */
561 static void dqput(struct dquot *dquot)
562 {
563         int ret;
564
565         if (!dquot)
566                 return;
567 #ifdef __DQUOT_PARANOIA
568         if (!atomic_read(&dquot->dq_count)) {
569                 printk("VFS: dqput: trying to free free dquot\n");
570                 printk("VFS: device %s, dquot of %s %d\n",
571                         dquot->dq_sb->s_id,
572                         quotatypes[dquot->dq_type],
573                         dquot->dq_id);
574                 BUG();
575         }
576 #endif
577         
578         spin_lock(&dq_list_lock);
579         dqstats.drops++;
580         spin_unlock(&dq_list_lock);
581 we_slept:
582         spin_lock(&dq_list_lock);
583         if (atomic_read(&dquot->dq_count) > 1) {
584                 /* We have more than one user... nothing to do */
585                 atomic_dec(&dquot->dq_count);
586                 /* Releasing dquot during quotaoff phase? */
587                 if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) &&
588                     atomic_read(&dquot->dq_count) == 1)
589                         wake_up(&dquot->dq_wait_unused);
590                 spin_unlock(&dq_list_lock);
591                 return;
592         }
593         /* Need to release dquot? */
594         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && dquot_dirty(dquot)) {
595                 spin_unlock(&dq_list_lock);
596                 /* Commit dquot before releasing */
597                 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
598                 if (ret < 0) {
599                         printk(KERN_ERR "VFS: cannot write quota structure on "
600                                 "device %s (error %d). Quota may get out of "
601                                 "sync!\n", dquot->dq_sb->s_id, ret);
602                         /*
603                          * We clear dirty bit anyway, so that we avoid
604                          * infinite loop here
605                          */
606                         spin_lock(&dq_list_lock);
607                         clear_dquot_dirty(dquot);
608                         spin_unlock(&dq_list_lock);
609                 }
610                 goto we_slept;
611         }
612         /* Clear flag in case dquot was inactive (something bad happened) */
613         clear_dquot_dirty(dquot);
614         if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
615                 spin_unlock(&dq_list_lock);
616                 dquot->dq_sb->dq_op->release_dquot(dquot);
617                 goto we_slept;
618         }
619         atomic_dec(&dquot->dq_count);
620 #ifdef __DQUOT_PARANOIA
621         /* sanity check */
622         BUG_ON(!list_empty(&dquot->dq_free));
623 #endif
624         put_dquot_last(dquot);
625         spin_unlock(&dq_list_lock);
626 }
627
628 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
629 {
630         struct dquot *dquot;
631
632         dquot = kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
633         if(!dquot)
634                 return NODQUOT;
635
636         mutex_init(&dquot->dq_lock);
637         INIT_LIST_HEAD(&dquot->dq_free);
638         INIT_LIST_HEAD(&dquot->dq_inuse);
639         INIT_HLIST_NODE(&dquot->dq_hash);
640         INIT_LIST_HEAD(&dquot->dq_dirty);
641         init_waitqueue_head(&dquot->dq_wait_unused);
642         dquot->dq_sb = sb;
643         dquot->dq_type = type;
644         atomic_set(&dquot->dq_count, 1);
645
646         return dquot;
647 }
648
649 /*
650  * Get reference to dquot
651  * MUST be called with either dqptr_sem or dqonoff_mutex held
652  */
653 static struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
654 {
655         unsigned int hashent = hashfn(sb, id, type);
656         struct dquot *dquot, *empty = NODQUOT;
657
658         if (!sb_has_quota_enabled(sb, type))
659                 return NODQUOT;
660 we_slept:
661         spin_lock(&dq_list_lock);
662         if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
663                 if (empty == NODQUOT) {
664                         spin_unlock(&dq_list_lock);
665                         if ((empty = get_empty_dquot(sb, type)) == NODQUOT)
666                                 schedule();     /* Try to wait for a moment... */
667                         goto we_slept;
668                 }
669                 dquot = empty;
670                 dquot->dq_id = id;
671                 /* all dquots go on the inuse_list */
672                 put_inuse(dquot);
673                 /* hash it first so it can be found */
674                 insert_dquot_hash(dquot);
675                 dqstats.lookups++;
676                 spin_unlock(&dq_list_lock);
677         } else {
678                 if (!atomic_read(&dquot->dq_count))
679                         remove_free_dquot(dquot);
680                 atomic_inc(&dquot->dq_count);
681                 dqstats.cache_hits++;
682                 dqstats.lookups++;
683                 spin_unlock(&dq_list_lock);
684                 if (empty)
685                         kmem_cache_free(dquot_cachep, empty);
686         }
687         /* Wait for dq_lock - after this we know that either dquot_release() is already
688          * finished or it will be canceled due to dq_count > 1 test */
689         wait_on_dquot(dquot);
690         /* Read the dquot and instantiate it (everything done only if needed) */
691         if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
692                 dqput(dquot);
693                 return NODQUOT;
694         }
695 #ifdef __DQUOT_PARANOIA
696         BUG_ON(!dquot->dq_sb);  /* Has somebody invalidated entry under us? */
697 #endif
698
699         return dquot;
700 }
701
702 static int dqinit_needed(struct inode *inode, int type)
703 {
704         int cnt;
705
706         if (IS_NOQUOTA(inode))
707                 return 0;
708         if (type != -1)
709                 return inode->i_dquot[type] == NODQUOT;
710         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
711                 if (inode->i_dquot[cnt] == NODQUOT)
712                         return 1;
713         return 0;
714 }
715
716 /* This routine is guarded by dqonoff_mutex mutex */
717 static void add_dquot_ref(struct super_block *sb, int type)
718 {
719         struct inode *inode, *old_inode = NULL;
720
721         spin_lock(&inode_lock);
722         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
723                 if (!atomic_read(&inode->i_writecount))
724                         continue;
725                 if (!dqinit_needed(inode, type))
726                         continue;
727                 if (inode->i_state & (I_FREEING|I_WILL_FREE))
728                         continue;
729
730                 __iget(inode);
731                 spin_unlock(&inode_lock);
732
733                 iput(old_inode);
734                 sb->dq_op->initialize(inode, type);
735                 /* We hold a reference to 'inode' so it couldn't have been
736                  * removed from s_inodes list while we dropped the inode_lock.
737                  * We cannot iput the inode now as we can be holding the last
738                  * reference and we cannot iput it under inode_lock. So we
739                  * keep the reference and iput it later. */
740                 old_inode = inode;
741                 spin_lock(&inode_lock);
742         }
743         spin_unlock(&inode_lock);
744         iput(old_inode);
745 }
746
747 /* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
748 static inline int dqput_blocks(struct dquot *dquot)
749 {
750         if (atomic_read(&dquot->dq_count) <= 1)
751                 return 1;
752         return 0;
753 }
754
755 /* Remove references to dquots from inode - add dquot to list for freeing if needed */
756 /* We can't race with anybody because we hold dqptr_sem for writing... */
757 static int remove_inode_dquot_ref(struct inode *inode, int type,
758                                   struct list_head *tofree_head)
759 {
760         struct dquot *dquot = inode->i_dquot[type];
761
762         inode->i_dquot[type] = NODQUOT;
763         if (dquot != NODQUOT) {
764                 if (dqput_blocks(dquot)) {
765 #ifdef __DQUOT_PARANOIA
766                         if (atomic_read(&dquot->dq_count) != 1)
767                                 printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
768 #endif
769                         spin_lock(&dq_list_lock);
770                         list_add(&dquot->dq_free, tofree_head); /* As dquot must have currently users it can't be on the free list... */
771                         spin_unlock(&dq_list_lock);
772                         return 1;
773                 }
774                 else
775                         dqput(dquot);   /* We have guaranteed we won't block */
776         }
777         return 0;
778 }
779
780 /* Free list of dquots - called from inode.c */
781 /* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
782 static void put_dquot_list(struct list_head *tofree_head)
783 {
784         struct list_head *act_head;
785         struct dquot *dquot;
786
787         act_head = tofree_head->next;
788         /* So now we have dquots on the list... Just free them */
789         while (act_head != tofree_head) {
790                 dquot = list_entry(act_head, struct dquot, dq_free);
791                 act_head = act_head->next;
792                 list_del_init(&dquot->dq_free); /* Remove dquot from the list so we won't have problems... */
793                 dqput(dquot);
794         }
795 }
796
797 static void remove_dquot_ref(struct super_block *sb, int type,
798                 struct list_head *tofree_head)
799 {
800         struct inode *inode;
801
802         spin_lock(&inode_lock);
803         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
804                 if (!IS_NOQUOTA(inode))
805                         remove_inode_dquot_ref(inode, type, tofree_head);
806         }
807         spin_unlock(&inode_lock);
808 }
809
810 /* Gather all references from inodes and drop them */
811 static void drop_dquot_ref(struct super_block *sb, int type)
812 {
813         LIST_HEAD(tofree_head);
814
815         if (sb->dq_op) {
816                 down_write(&sb_dqopt(sb)->dqptr_sem);
817                 remove_dquot_ref(sb, type, &tofree_head);
818                 up_write(&sb_dqopt(sb)->dqptr_sem);
819                 put_dquot_list(&tofree_head);
820         }
821 }
822
823 static inline void dquot_incr_inodes(struct dquot *dquot, unsigned long number)
824 {
825         dquot->dq_dqb.dqb_curinodes += number;
826 }
827
828 static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
829 {
830         dquot->dq_dqb.dqb_curspace += number;
831 }
832
833 static inline void dquot_decr_inodes(struct dquot *dquot, unsigned long number)
834 {
835         if (dquot->dq_dqb.dqb_curinodes > number)
836                 dquot->dq_dqb.dqb_curinodes -= number;
837         else
838                 dquot->dq_dqb.dqb_curinodes = 0;
839         if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
840                 dquot->dq_dqb.dqb_itime = (time_t) 0;
841         clear_bit(DQ_INODES_B, &dquot->dq_flags);
842 }
843
844 static inline void dquot_decr_space(struct dquot *dquot, qsize_t number)
845 {
846         if (dquot->dq_dqb.dqb_curspace > number)
847                 dquot->dq_dqb.dqb_curspace -= number;
848         else
849                 dquot->dq_dqb.dqb_curspace = 0;
850         if (toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit)
851                 dquot->dq_dqb.dqb_btime = (time_t) 0;
852         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
853 }
854
855 static int warning_issued(struct dquot *dquot, const int warntype)
856 {
857         int flag = (warntype == QUOTA_NL_BHARDWARN ||
858                 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
859                 ((warntype == QUOTA_NL_IHARDWARN ||
860                 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
861
862         if (!flag)
863                 return 0;
864         return test_and_set_bit(flag, &dquot->dq_flags);
865 }
866
867 #ifdef CONFIG_PRINT_QUOTA_WARNING
868 static int flag_print_warnings = 1;
869
870 static inline int need_print_warning(struct dquot *dquot)
871 {
872         if (!flag_print_warnings)
873                 return 0;
874
875         switch (dquot->dq_type) {
876                 case USRQUOTA:
877                         return current->fsuid == dquot->dq_id;
878                 case GRPQUOTA:
879                         return in_group_p(dquot->dq_id);
880         }
881         return 0;
882 }
883
884 /* Print warning to user which exceeded quota */
885 static void print_warning(struct dquot *dquot, const int warntype)
886 {
887         char *msg = NULL;
888         struct tty_struct *tty;
889
890         if (warntype == QUOTA_NL_IHARDBELOW ||
891             warntype == QUOTA_NL_ISOFTBELOW ||
892             warntype == QUOTA_NL_BHARDBELOW ||
893             warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(dquot))
894                 return;
895
896         tty = get_current_tty();
897         if (!tty)
898                 return;
899         tty_write_message(tty, dquot->dq_sb->s_id);
900         if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
901                 tty_write_message(tty, ": warning, ");
902         else
903                 tty_write_message(tty, ": write failed, ");
904         tty_write_message(tty, quotatypes[dquot->dq_type]);
905         switch (warntype) {
906                 case QUOTA_NL_IHARDWARN:
907                         msg = " file limit reached.\r\n";
908                         break;
909                 case QUOTA_NL_ISOFTLONGWARN:
910                         msg = " file quota exceeded too long.\r\n";
911                         break;
912                 case QUOTA_NL_ISOFTWARN:
913                         msg = " file quota exceeded.\r\n";
914                         break;
915                 case QUOTA_NL_BHARDWARN:
916                         msg = " block limit reached.\r\n";
917                         break;
918                 case QUOTA_NL_BSOFTLONGWARN:
919                         msg = " block quota exceeded too long.\r\n";
920                         break;
921                 case QUOTA_NL_BSOFTWARN:
922                         msg = " block quota exceeded.\r\n";
923                         break;
924         }
925         tty_write_message(tty, msg);
926         tty_kref_put(tty);
927 }
928 #endif
929
930 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
931
932 /* Netlink family structure for quota */
933 static struct genl_family quota_genl_family = {
934         .id = GENL_ID_GENERATE,
935         .hdrsize = 0,
936         .name = "VFS_DQUOT",
937         .version = 1,
938         .maxattr = QUOTA_NL_A_MAX,
939 };
940
941 /* Send warning to userspace about user which exceeded quota */
942 static void send_warning(const struct dquot *dquot, const char warntype)
943 {
944         static atomic_t seq;
945         struct sk_buff *skb;
946         void *msg_head;
947         int ret;
948         int msg_size = 4 * nla_total_size(sizeof(u32)) +
949                        2 * nla_total_size(sizeof(u64));
950
951         /* We have to allocate using GFP_NOFS as we are called from a
952          * filesystem performing write and thus further recursion into
953          * the fs to free some data could cause deadlocks. */
954         skb = genlmsg_new(msg_size, GFP_NOFS);
955         if (!skb) {
956                 printk(KERN_ERR
957                   "VFS: Not enough memory to send quota warning.\n");
958                 return;
959         }
960         msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
961                         &quota_genl_family, 0, QUOTA_NL_C_WARNING);
962         if (!msg_head) {
963                 printk(KERN_ERR
964                   "VFS: Cannot store netlink header in quota warning.\n");
965                 goto err_out;
966         }
967         ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, dquot->dq_type);
968         if (ret)
969                 goto attr_err_out;
970         ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, dquot->dq_id);
971         if (ret)
972                 goto attr_err_out;
973         ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype);
974         if (ret)
975                 goto attr_err_out;
976         ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR,
977                 MAJOR(dquot->dq_sb->s_dev));
978         if (ret)
979                 goto attr_err_out;
980         ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR,
981                 MINOR(dquot->dq_sb->s_dev));
982         if (ret)
983                 goto attr_err_out;
984         ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current->user->uid);
985         if (ret)
986                 goto attr_err_out;
987         genlmsg_end(skb, msg_head);
988
989         ret = genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS);
990         if (ret < 0 && ret != -ESRCH)
991                 printk(KERN_ERR
992                         "VFS: Failed to send notification message: %d\n", ret);
993         return;
994 attr_err_out:
995         printk(KERN_ERR "VFS: Not enough space to compose quota message!\n");
996 err_out:
997         kfree_skb(skb);
998 }
999 #endif
1000
1001 static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
1002 {
1003         int i;
1004
1005         for (i = 0; i < MAXQUOTAS; i++)
1006                 if (dquots[i] != NODQUOT && warntype[i] != QUOTA_NL_NOWARN &&
1007                     !warning_issued(dquots[i], warntype[i])) {
1008 #ifdef CONFIG_PRINT_QUOTA_WARNING
1009                         print_warning(dquots[i], warntype[i]);
1010 #endif
1011 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
1012                         send_warning(dquots[i], warntype[i]);
1013 #endif
1014                 }
1015 }
1016
1017 static inline char ignore_hardlimit(struct dquot *dquot)
1018 {
1019         struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1020
1021         return capable(CAP_SYS_RESOURCE) &&
1022             (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
1023 }
1024
1025 /* needs dq_data_lock */
1026 static int check_idq(struct dquot *dquot, ulong inodes, char *warntype)
1027 {
1028         *warntype = QUOTA_NL_NOWARN;
1029         if (inodes <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
1030                 return QUOTA_OK;
1031
1032         if (dquot->dq_dqb.dqb_ihardlimit &&
1033            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
1034             !ignore_hardlimit(dquot)) {
1035                 *warntype = QUOTA_NL_IHARDWARN;
1036                 return NO_QUOTA;
1037         }
1038
1039         if (dquot->dq_dqb.dqb_isoftlimit &&
1040            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
1041             dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
1042             !ignore_hardlimit(dquot)) {
1043                 *warntype = QUOTA_NL_ISOFTLONGWARN;
1044                 return NO_QUOTA;
1045         }
1046
1047         if (dquot->dq_dqb.dqb_isoftlimit &&
1048            (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
1049             dquot->dq_dqb.dqb_itime == 0) {
1050                 *warntype = QUOTA_NL_ISOFTWARN;
1051                 dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
1052         }
1053
1054         return QUOTA_OK;
1055 }
1056
1057 /* needs dq_data_lock */
1058 static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
1059 {
1060         *warntype = QUOTA_NL_NOWARN;
1061         if (space <= 0 || test_bit(DQ_FAKE_B, &dquot->dq_flags))
1062                 return QUOTA_OK;
1063
1064         if (dquot->dq_dqb.dqb_bhardlimit &&
1065            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bhardlimit &&
1066             !ignore_hardlimit(dquot)) {
1067                 if (!prealloc)
1068                         *warntype = QUOTA_NL_BHARDWARN;
1069                 return NO_QUOTA;
1070         }
1071
1072         if (dquot->dq_dqb.dqb_bsoftlimit &&
1073            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
1074             dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
1075             !ignore_hardlimit(dquot)) {
1076                 if (!prealloc)
1077                         *warntype = QUOTA_NL_BSOFTLONGWARN;
1078                 return NO_QUOTA;
1079         }
1080
1081         if (dquot->dq_dqb.dqb_bsoftlimit &&
1082            toqb(dquot->dq_dqb.dqb_curspace + space) > dquot->dq_dqb.dqb_bsoftlimit &&
1083             dquot->dq_dqb.dqb_btime == 0) {
1084                 if (!prealloc) {
1085                         *warntype = QUOTA_NL_BSOFTWARN;
1086                         dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
1087                 }
1088                 else
1089                         /*
1090                          * We don't allow preallocation to exceed softlimit so exceeding will
1091                          * be always printed
1092                          */
1093                         return NO_QUOTA;
1094         }
1095
1096         return QUOTA_OK;
1097 }
1098
1099 static int info_idq_free(struct dquot *dquot, ulong inodes)
1100 {
1101         if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1102             dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1103                 return QUOTA_NL_NOWARN;
1104
1105         if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
1106                 return QUOTA_NL_ISOFTBELOW;
1107         if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1108             dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
1109                 return QUOTA_NL_IHARDBELOW;
1110         return QUOTA_NL_NOWARN;
1111 }
1112
1113 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1114 {
1115         if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1116             toqb(dquot->dq_dqb.dqb_curspace) <= dquot->dq_dqb.dqb_bsoftlimit)
1117                 return QUOTA_NL_NOWARN;
1118
1119         if (toqb(dquot->dq_dqb.dqb_curspace - space) <=
1120             dquot->dq_dqb.dqb_bsoftlimit)
1121                 return QUOTA_NL_BSOFTBELOW;
1122         if (toqb(dquot->dq_dqb.dqb_curspace) >= dquot->dq_dqb.dqb_bhardlimit &&
1123             toqb(dquot->dq_dqb.dqb_curspace - space) <
1124                                                 dquot->dq_dqb.dqb_bhardlimit)
1125                 return QUOTA_NL_BHARDBELOW;
1126         return QUOTA_NL_NOWARN;
1127 }
1128 /*
1129  *      Initialize quota pointers in inode
1130  *      Transaction must be started at entry
1131  */
1132 int dquot_initialize(struct inode *inode, int type)
1133 {
1134         unsigned int id = 0;
1135         int cnt, ret = 0;
1136
1137         /* First test before acquiring mutex - solves deadlocks when we
1138          * re-enter the quota code and are already holding the mutex */
1139         if (IS_NOQUOTA(inode))
1140                 return 0;
1141         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1142         /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
1143         if (IS_NOQUOTA(inode))
1144                 goto out_err;
1145         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1146                 if (type != -1 && cnt != type)
1147                         continue;
1148                 if (inode->i_dquot[cnt] == NODQUOT) {
1149                         switch (cnt) {
1150                                 case USRQUOTA:
1151                                         id = inode->i_uid;
1152                                         break;
1153                                 case GRPQUOTA:
1154                                         id = inode->i_gid;
1155                                         break;
1156                         }
1157                         inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
1158                 }
1159         }
1160 out_err:
1161         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1162         return ret;
1163 }
1164
1165 /*
1166  *      Release all quotas referenced by inode
1167  *      Transaction must be started at an entry
1168  */
1169 int dquot_drop(struct inode *inode)
1170 {
1171         int cnt;
1172
1173         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1174         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1175                 if (inode->i_dquot[cnt] != NODQUOT) {
1176                         dqput(inode->i_dquot[cnt]);
1177                         inode->i_dquot[cnt] = NODQUOT;
1178                 }
1179         }
1180         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1181         return 0;
1182 }
1183
1184 /* Wrapper to remove references to quota structures from inode */
1185 void vfs_dq_drop(struct inode *inode)
1186 {
1187         /* Here we can get arbitrary inode from clear_inode() so we have
1188          * to be careful. OTOH we don't need locking as quota operations
1189          * are allowed to change only at mount time */
1190         if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op
1191             && inode->i_sb->dq_op->drop) {
1192                 int cnt;
1193                 /* Test before calling to rule out calls from proc and such
1194                  * where we are not allowed to block. Note that this is
1195                  * actually reliable test even without the lock - the caller
1196                  * must assure that nobody can come after the DQUOT_DROP and
1197                  * add quota pointers back anyway */
1198                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1199                         if (inode->i_dquot[cnt] != NODQUOT)
1200                                 break;
1201                 if (cnt < MAXQUOTAS)
1202                         inode->i_sb->dq_op->drop(inode);
1203         }
1204 }
1205
1206 /*
1207  * Following four functions update i_blocks+i_bytes fields and
1208  * quota information (together with appropriate checks)
1209  * NOTE: We absolutely rely on the fact that caller dirties
1210  * the inode (usually macros in quotaops.h care about this) and
1211  * holds a handle for the current transaction so that dquot write and
1212  * inode write go into the same transaction.
1213  */
1214
1215 /*
1216  * This operation can block, but only after everything is updated
1217  */
1218 int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
1219 {
1220         int cnt, ret = NO_QUOTA;
1221         char warntype[MAXQUOTAS];
1222
1223         /* First test before acquiring mutex - solves deadlocks when we
1224          * re-enter the quota code and are already holding the mutex */
1225         if (IS_NOQUOTA(inode)) {
1226 out_add:
1227                 inode_add_bytes(inode, number);
1228                 return QUOTA_OK;
1229         }
1230         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1231                 warntype[cnt] = QUOTA_NL_NOWARN;
1232
1233         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1234         if (IS_NOQUOTA(inode)) {        /* Now we can do reliable test... */
1235                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1236                 goto out_add;
1237         }
1238         spin_lock(&dq_data_lock);
1239         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1240                 if (inode->i_dquot[cnt] == NODQUOT)
1241                         continue;
1242                 if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
1243                         goto warn_put_all;
1244         }
1245         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1246                 if (inode->i_dquot[cnt] == NODQUOT)
1247                         continue;
1248                 dquot_incr_space(inode->i_dquot[cnt], number);
1249         }
1250         inode_add_bytes(inode, number);
1251         ret = QUOTA_OK;
1252 warn_put_all:
1253         spin_unlock(&dq_data_lock);
1254         if (ret == QUOTA_OK)
1255                 /* Dirtify all the dquots - this can block when journalling */
1256                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1257                         if (inode->i_dquot[cnt])
1258                                 mark_dquot_dirty(inode->i_dquot[cnt]);
1259         flush_warnings(inode->i_dquot, warntype);
1260         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1261         return ret;
1262 }
1263
1264 /*
1265  * This operation can block, but only after everything is updated
1266  */
1267 int dquot_alloc_inode(const struct inode *inode, unsigned long number)
1268 {
1269         int cnt, ret = NO_QUOTA;
1270         char warntype[MAXQUOTAS];
1271
1272         /* First test before acquiring mutex - solves deadlocks when we
1273          * re-enter the quota code and are already holding the mutex */
1274         if (IS_NOQUOTA(inode))
1275                 return QUOTA_OK;
1276         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1277                 warntype[cnt] = QUOTA_NL_NOWARN;
1278         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1279         if (IS_NOQUOTA(inode)) {
1280                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1281                 return QUOTA_OK;
1282         }
1283         spin_lock(&dq_data_lock);
1284         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1285                 if (inode->i_dquot[cnt] == NODQUOT)
1286                         continue;
1287                 if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
1288                         goto warn_put_all;
1289         }
1290
1291         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1292                 if (inode->i_dquot[cnt] == NODQUOT)
1293                         continue;
1294                 dquot_incr_inodes(inode->i_dquot[cnt], number);
1295         }
1296         ret = QUOTA_OK;
1297 warn_put_all:
1298         spin_unlock(&dq_data_lock);
1299         if (ret == QUOTA_OK)
1300                 /* Dirtify all the dquots - this can block when journalling */
1301                 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1302                         if (inode->i_dquot[cnt])
1303                                 mark_dquot_dirty(inode->i_dquot[cnt]);
1304         flush_warnings(inode->i_dquot, warntype);
1305         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1306         return ret;
1307 }
1308
1309 /*
1310  * This operation can block, but only after everything is updated
1311  */
1312 int dquot_free_space(struct inode *inode, qsize_t number)
1313 {
1314         unsigned int cnt;
1315         char warntype[MAXQUOTAS];
1316
1317         /* First test before acquiring mutex - solves deadlocks when we
1318          * re-enter the quota code and are already holding the mutex */
1319         if (IS_NOQUOTA(inode)) {
1320 out_sub:
1321                 inode_sub_bytes(inode, number);
1322                 return QUOTA_OK;
1323         }
1324
1325         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1326         /* Now recheck reliably when holding dqptr_sem */
1327         if (IS_NOQUOTA(inode)) {
1328                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1329                 goto out_sub;
1330         }
1331         spin_lock(&dq_data_lock);
1332         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1333                 if (inode->i_dquot[cnt] == NODQUOT)
1334                         continue;
1335                 warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number);
1336                 dquot_decr_space(inode->i_dquot[cnt], number);
1337         }
1338         inode_sub_bytes(inode, number);
1339         spin_unlock(&dq_data_lock);
1340         /* Dirtify all the dquots - this can block when journalling */
1341         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1342                 if (inode->i_dquot[cnt])
1343                         mark_dquot_dirty(inode->i_dquot[cnt]);
1344         flush_warnings(inode->i_dquot, warntype);
1345         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1346         return QUOTA_OK;
1347 }
1348
1349 /*
1350  * This operation can block, but only after everything is updated
1351  */
1352 int dquot_free_inode(const struct inode *inode, unsigned long number)
1353 {
1354         unsigned int cnt;
1355         char warntype[MAXQUOTAS];
1356
1357         /* First test before acquiring mutex - solves deadlocks when we
1358          * re-enter the quota code and are already holding the mutex */
1359         if (IS_NOQUOTA(inode))
1360                 return QUOTA_OK;
1361
1362         down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1363         /* Now recheck reliably when holding dqptr_sem */
1364         if (IS_NOQUOTA(inode)) {
1365                 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1366                 return QUOTA_OK;
1367         }
1368         spin_lock(&dq_data_lock);
1369         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1370                 if (inode->i_dquot[cnt] == NODQUOT)
1371                         continue;
1372                 warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number);
1373                 dquot_decr_inodes(inode->i_dquot[cnt], number);
1374         }
1375         spin_unlock(&dq_data_lock);
1376         /* Dirtify all the dquots - this can block when journalling */
1377         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1378                 if (inode->i_dquot[cnt])
1379                         mark_dquot_dirty(inode->i_dquot[cnt]);
1380         flush_warnings(inode->i_dquot, warntype);
1381         up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
1382         return QUOTA_OK;
1383 }
1384
1385 /*
1386  * Transfer the number of inode and blocks from one diskquota to an other.
1387  *
1388  * This operation can block, but only after everything is updated
1389  * A transaction must be started when entering this function.
1390  */
1391 int dquot_transfer(struct inode *inode, struct iattr *iattr)
1392 {
1393         qsize_t space;
1394         struct dquot *transfer_from[MAXQUOTAS];
1395         struct dquot *transfer_to[MAXQUOTAS];
1396         int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
1397             chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
1398         char warntype_to[MAXQUOTAS];
1399         char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
1400
1401         /* First test before acquiring mutex - solves deadlocks when we
1402          * re-enter the quota code and are already holding the mutex */
1403         if (IS_NOQUOTA(inode))
1404                 return QUOTA_OK;
1405         /* Clear the arrays */
1406         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1407                 transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
1408                 warntype_to[cnt] = QUOTA_NL_NOWARN;
1409         }
1410         down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1411         /* Now recheck reliably when holding dqptr_sem */
1412         if (IS_NOQUOTA(inode)) {        /* File without quota accounting? */
1413                 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1414                 return QUOTA_OK;
1415         }
1416         /* First build the transfer_to list - here we can block on
1417          * reading/instantiating of dquots.  We know that the transaction for
1418          * us was already started so we don't violate lock ranking here */
1419         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1420                 switch (cnt) {
1421                         case USRQUOTA:
1422                                 if (!chuid)
1423                                         continue;
1424                                 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
1425                                 break;
1426                         case GRPQUOTA:
1427                                 if (!chgid)
1428                                         continue;
1429                                 transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
1430                                 break;
1431                 }
1432         }
1433         spin_lock(&dq_data_lock);
1434         space = inode_get_bytes(inode);
1435         /* Build the transfer_from list and check the limits */
1436         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1437                 if (transfer_to[cnt] == NODQUOT)
1438                         continue;
1439                 transfer_from[cnt] = inode->i_dquot[cnt];
1440                 if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
1441                     NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
1442                     warntype_to + cnt) == NO_QUOTA)
1443                         goto warn_put_all;
1444         }
1445
1446         /*
1447          * Finally perform the needed transfer from transfer_from to transfer_to
1448          */
1449         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1450                 /*
1451                  * Skip changes for same uid or gid or for turned off quota-type.
1452                  */
1453                 if (transfer_to[cnt] == NODQUOT)
1454                         continue;
1455
1456                 /* Due to IO error we might not have transfer_from[] structure */
1457                 if (transfer_from[cnt]) {
1458                         warntype_from_inodes[cnt] =
1459                                 info_idq_free(transfer_from[cnt], 1);
1460                         warntype_from_space[cnt] =
1461                                 info_bdq_free(transfer_from[cnt], space);
1462                         dquot_decr_inodes(transfer_from[cnt], 1);
1463                         dquot_decr_space(transfer_from[cnt], space);
1464                 }
1465
1466                 dquot_incr_inodes(transfer_to[cnt], 1);
1467                 dquot_incr_space(transfer_to[cnt], space);
1468
1469                 inode->i_dquot[cnt] = transfer_to[cnt];
1470         }
1471         ret = QUOTA_OK;
1472 warn_put_all:
1473         spin_unlock(&dq_data_lock);
1474         /* Dirtify all the dquots - this can block when journalling */
1475         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1476                 if (transfer_from[cnt])
1477                         mark_dquot_dirty(transfer_from[cnt]);
1478                 if (transfer_to[cnt])
1479                         mark_dquot_dirty(transfer_to[cnt]);
1480         }
1481         flush_warnings(transfer_to, warntype_to);
1482         flush_warnings(transfer_from, warntype_from_inodes);
1483         flush_warnings(transfer_from, warntype_from_space);
1484         
1485         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1486                 if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
1487                         dqput(transfer_from[cnt]);
1488                 if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
1489                         dqput(transfer_to[cnt]);
1490         }
1491         up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
1492         return ret;
1493 }
1494
1495 /* Wrapper for transferring ownership of an inode */
1496 int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
1497 {
1498         if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
1499                 vfs_dq_init(inode);
1500                 if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA)
1501                         return 1;
1502         }
1503         return 0;
1504 }
1505
1506
1507 /*
1508  * Write info of quota file to disk
1509  */
1510 int dquot_commit_info(struct super_block *sb, int type)
1511 {
1512         int ret;
1513         struct quota_info *dqopt = sb_dqopt(sb);
1514
1515         mutex_lock(&dqopt->dqio_mutex);
1516         ret = dqopt->ops[type]->write_file_info(sb, type);
1517         mutex_unlock(&dqopt->dqio_mutex);
1518         return ret;
1519 }
1520
1521 /*
1522  * Definitions of diskquota operations.
1523  */
1524 struct dquot_operations dquot_operations = {
1525         .initialize     = dquot_initialize,
1526         .drop           = dquot_drop,
1527         .alloc_space    = dquot_alloc_space,
1528         .alloc_inode    = dquot_alloc_inode,
1529         .free_space     = dquot_free_space,
1530         .free_inode     = dquot_free_inode,
1531         .transfer       = dquot_transfer,
1532         .write_dquot    = dquot_commit,
1533         .acquire_dquot  = dquot_acquire,
1534         .release_dquot  = dquot_release,
1535         .mark_dirty     = dquot_mark_dquot_dirty,
1536         .write_info     = dquot_commit_info
1537 };
1538
1539 static inline void set_enable_flags(struct quota_info *dqopt, int type)
1540 {
1541         switch (type) {
1542                 case USRQUOTA:
1543                         dqopt->flags |= DQUOT_USR_ENABLED;
1544                         dqopt->flags &= ~DQUOT_USR_SUSPENDED;
1545                         break;
1546                 case GRPQUOTA:
1547                         dqopt->flags |= DQUOT_GRP_ENABLED;
1548                         dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
1549                         break;
1550         }
1551 }
1552
1553 static inline void reset_enable_flags(struct quota_info *dqopt, int type,
1554                                       int remount)
1555 {
1556         switch (type) {
1557                 case USRQUOTA:
1558                         dqopt->flags &= ~DQUOT_USR_ENABLED;
1559                         if (remount)
1560                                 dqopt->flags |= DQUOT_USR_SUSPENDED;
1561                         else
1562                                 dqopt->flags &= ~DQUOT_USR_SUSPENDED;
1563                         break;
1564                 case GRPQUOTA:
1565                         dqopt->flags &= ~DQUOT_GRP_ENABLED;
1566                         if (remount)
1567                                 dqopt->flags |= DQUOT_GRP_SUSPENDED;
1568                         else
1569                                 dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
1570                         break;
1571         }
1572 }
1573
1574
1575 /*
1576  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
1577  */
1578 int vfs_quota_off(struct super_block *sb, int type, int remount)
1579 {
1580         int cnt, ret = 0;
1581         struct quota_info *dqopt = sb_dqopt(sb);
1582         struct inode *toputinode[MAXQUOTAS];
1583
1584         /* We need to serialize quota_off() for device */
1585         mutex_lock(&dqopt->dqonoff_mutex);
1586
1587         /*
1588          * Skip everything if there's nothing to do. We have to do this because
1589          * sometimes we are called when fill_super() failed and calling
1590          * sync_fs() in such cases does no good.
1591          */
1592         if (!sb_any_quota_enabled(sb) && !sb_any_quota_suspended(sb)) {
1593                 mutex_unlock(&dqopt->dqonoff_mutex);
1594                 return 0;
1595         }
1596         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1597                 toputinode[cnt] = NULL;
1598                 if (type != -1 && cnt != type)
1599                         continue;
1600                 /* If we keep inodes of quota files after remount and quotaoff
1601                  * is called, drop kept inodes. */
1602                 if (!remount && sb_has_quota_suspended(sb, cnt)) {
1603                         iput(dqopt->files[cnt]);
1604                         dqopt->files[cnt] = NULL;
1605                         reset_enable_flags(dqopt, cnt, 0);
1606                         continue;
1607                 }
1608                 if (!sb_has_quota_enabled(sb, cnt))
1609                         continue;
1610                 reset_enable_flags(dqopt, cnt, remount);
1611
1612                 /* Note: these are blocking operations */
1613                 drop_dquot_ref(sb, cnt);
1614                 invalidate_dquots(sb, cnt);
1615                 /*
1616                  * Now all dquots should be invalidated, all writes done so we should be only
1617                  * users of the info. No locks needed.
1618                  */
1619                 if (info_dirty(&dqopt->info[cnt]))
1620                         sb->dq_op->write_info(sb, cnt);
1621                 if (dqopt->ops[cnt]->free_file_info)
1622                         dqopt->ops[cnt]->free_file_info(sb, cnt);
1623                 put_quota_format(dqopt->info[cnt].dqi_format);
1624
1625                 toputinode[cnt] = dqopt->files[cnt];
1626                 if (!remount)
1627                         dqopt->files[cnt] = NULL;
1628                 dqopt->info[cnt].dqi_flags = 0;
1629                 dqopt->info[cnt].dqi_igrace = 0;
1630                 dqopt->info[cnt].dqi_bgrace = 0;
1631                 dqopt->ops[cnt] = NULL;
1632         }
1633         mutex_unlock(&dqopt->dqonoff_mutex);
1634         /* Sync the superblock so that buffers with quota data are written to
1635          * disk (and so userspace sees correct data afterwards). */
1636         if (sb->s_op->sync_fs)
1637                 sb->s_op->sync_fs(sb, 1);
1638         sync_blockdev(sb->s_bdev);
1639         /* Now the quota files are just ordinary files and we can set the
1640          * inode flags back. Moreover we discard the pagecache so that
1641          * userspace sees the writes we did bypassing the pagecache. We
1642          * must also discard the blockdev buffers so that we see the
1643          * changes done by userspace on the next quotaon() */
1644         for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1645                 if (toputinode[cnt]) {
1646                         mutex_lock(&dqopt->dqonoff_mutex);
1647                         /* If quota was reenabled in the meantime, we have
1648                          * nothing to do */
1649                         if (!sb_has_quota_enabled(sb, cnt)) {
1650                                 mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
1651                                 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
1652                                   S_NOATIME | S_NOQUOTA);
1653                                 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
1654                                 mutex_unlock(&toputinode[cnt]->i_mutex);
1655                                 mark_inode_dirty(toputinode[cnt]);
1656                         }
1657                         mutex_unlock(&dqopt->dqonoff_mutex);
1658                         /* On remount RO, we keep the inode pointer so that we
1659                          * can reenable quota on the subsequent remount RW.
1660                          * But we have better not keep inode pointer when there
1661                          * is pending delete on the quota file... */
1662                         if (!remount)
1663                                 iput(toputinode[cnt]);
1664                         else if (!toputinode[cnt]->i_nlink)
1665                                 ret = -EBUSY;
1666                 }
1667         if (sb->s_bdev)
1668                 invalidate_bdev(sb->s_bdev);
1669         return ret;
1670 }
1671
1672 /*
1673  *      Turn quotas on on a device
1674  */
1675
1676 /* Helper function when we already have the inode */
1677 static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
1678 {
1679         struct quota_format_type *fmt = find_quota_format(format_id);
1680         struct super_block *sb = inode->i_sb;
1681         struct quota_info *dqopt = sb_dqopt(sb);
1682         int error;
1683         int oldflags = -1;
1684
1685         if (!fmt)
1686                 return -ESRCH;
1687         if (!S_ISREG(inode->i_mode)) {
1688                 error = -EACCES;
1689                 goto out_fmt;
1690         }
1691         if (IS_RDONLY(inode)) {
1692                 error = -EROFS;
1693                 goto out_fmt;
1694         }
1695         if (!sb->s_op->quota_write || !sb->s_op->quota_read) {
1696                 error = -EINVAL;
1697                 goto out_fmt;
1698         }
1699
1700         /* As we bypass the pagecache we must now flush the inode so that
1701          * we see all the changes from userspace... */
1702         write_inode_now(inode, 1);
1703         /* And now flush the block cache so that kernel sees the changes */
1704         invalidate_bdev(sb->s_bdev);
1705         mutex_lock(&inode->i_mutex);
1706         mutex_lock(&dqopt->dqonoff_mutex);
1707         if (sb_has_quota_enabled(sb, type) ||
1708                         sb_has_quota_suspended(sb, type)) {
1709                 error = -EBUSY;
1710                 goto out_lock;
1711         }
1712         /* We don't want quota and atime on quota files (deadlocks possible)
1713          * Also nobody should write to the file - we use special IO operations
1714          * which ignore the immutable bit. */
1715         down_write(&dqopt->dqptr_sem);
1716         oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
1717         inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
1718         up_write(&dqopt->dqptr_sem);
1719         sb->dq_op->drop(inode);
1720
1721         error = -EIO;
1722         dqopt->files[type] = igrab(inode);
1723         if (!dqopt->files[type])
1724                 goto out_lock;
1725         error = -EINVAL;
1726         if (!fmt->qf_ops->check_quota_file(sb, type))
1727                 goto out_file_init;
1728
1729         dqopt->ops[type] = fmt->qf_ops;
1730         dqopt->info[type].dqi_format = fmt;
1731         dqopt->info[type].dqi_fmt_id = format_id;
1732         INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
1733         mutex_lock(&dqopt->dqio_mutex);
1734         if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
1735                 mutex_unlock(&dqopt->dqio_mutex);
1736                 goto out_file_init;
1737         }
1738         mutex_unlock(&dqopt->dqio_mutex);
1739         mutex_unlock(&inode->i_mutex);
1740         set_enable_flags(dqopt, type);
1741
1742         add_dquot_ref(sb, type);
1743         mutex_unlock(&dqopt->dqonoff_mutex);
1744
1745         return 0;
1746
1747 out_file_init:
1748         dqopt->files[type] = NULL;
1749         iput(inode);
1750 out_lock:
1751         mutex_unlock(&dqopt->dqonoff_mutex);
1752         if (oldflags != -1) {
1753                 down_write(&dqopt->dqptr_sem);
1754                 /* Set the flags back (in the case of accidental quotaon()
1755                  * on a wrong file we don't want to mess up the flags) */
1756                 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
1757                 inode->i_flags |= oldflags;
1758                 up_write(&dqopt->dqptr_sem);
1759         }
1760         mutex_unlock(&inode->i_mutex);
1761 out_fmt:
1762         put_quota_format(fmt);
1763
1764         return error; 
1765 }
1766
1767 /* Reenable quotas on remount RW */
1768 static int vfs_quota_on_remount(struct super_block *sb, int type)
1769 {
1770         struct quota_info *dqopt = sb_dqopt(sb);
1771         struct inode *inode;
1772         int ret;
1773
1774         mutex_lock(&dqopt->dqonoff_mutex);
1775         if (!sb_has_quota_suspended(sb, type)) {
1776                 mutex_unlock(&dqopt->dqonoff_mutex);
1777                 return 0;
1778         }
1779         BUG_ON(sb_has_quota_enabled(sb, type));
1780
1781         inode = dqopt->files[type];
1782         dqopt->files[type] = NULL;
1783         reset_enable_flags(dqopt, type, 0);
1784         mutex_unlock(&dqopt->dqonoff_mutex);
1785
1786         ret = vfs_quota_on_inode(inode, type, dqopt->info[type].dqi_fmt_id);
1787         iput(inode);
1788
1789         return ret;
1790 }
1791
1792 int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
1793                       struct path *path)
1794 {
1795         int error = security_quota_on(path->dentry);
1796         if (error)
1797                 return error;
1798         /* Quota file not on the same filesystem? */
1799         if (path->mnt->mnt_sb != sb)
1800                 error = -EXDEV;
1801         else
1802                 error = vfs_quota_on_inode(path->dentry->d_inode, type,
1803                                            format_id);
1804         return error;
1805 }
1806
1807 /* Actual function called from quotactl() */
1808 int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
1809                  int remount)
1810 {
1811         struct path path;
1812         int error;
1813
1814         if (remount)
1815                 return vfs_quota_on_remount(sb, type);
1816
1817         error = kern_path(name, LOOKUP_FOLLOW, &path);
1818         if (!error) {
1819                 error = vfs_quota_on_path(sb, type, format_id, &path);
1820                 path_put(&path);
1821         }
1822         return error;
1823 }
1824
1825 /*
1826  * This function is used when filesystem needs to initialize quotas
1827  * during mount time.
1828  */
1829 int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
1830                 int format_id, int type)
1831 {
1832         struct dentry *dentry;
1833         int error;
1834
1835         dentry = lookup_one_len(qf_name, sb->s_root, strlen(qf_name));
1836         if (IS_ERR(dentry))
1837                 return PTR_ERR(dentry);
1838
1839         if (!dentry->d_inode) {
1840                 error = -ENOENT;
1841                 goto out;
1842         }
1843
1844         error = security_quota_on(dentry);
1845         if (!error)
1846                 error = vfs_quota_on_inode(dentry->d_inode, type, format_id);
1847
1848 out:
1849         dput(dentry);
1850         return error;
1851 }
1852
1853 /* Wrapper to turn on quotas when remounting rw */
1854 int vfs_dq_quota_on_remount(struct super_block *sb)
1855 {
1856         int cnt;
1857         int ret = 0, err;
1858
1859         if (!sb->s_qcop || !sb->s_qcop->quota_on)
1860                 return -ENOSYS;
1861         for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1862                 err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
1863                 if (err < 0 && !ret)
1864                         ret = err;
1865         }
1866         return ret;
1867 }
1868
1869 /* Generic routine for getting common part of quota structure */
1870 static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
1871 {
1872         struct mem_dqblk *dm = &dquot->dq_dqb;
1873
1874         spin_lock(&dq_data_lock);
1875         di->dqb_bhardlimit = dm->dqb_bhardlimit;
1876         di->dqb_bsoftlimit = dm->dqb_bsoftlimit;
1877         di->dqb_curspace = dm->dqb_curspace;
1878         di->dqb_ihardlimit = dm->dqb_ihardlimit;
1879         di->dqb_isoftlimit = dm->dqb_isoftlimit;
1880         di->dqb_curinodes = dm->dqb_curinodes;
1881         di->dqb_btime = dm->dqb_btime;
1882         di->dqb_itime = dm->dqb_itime;
1883         di->dqb_valid = QIF_ALL;
1884         spin_unlock(&dq_data_lock);
1885 }
1886
1887 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1888 {
1889         struct dquot *dquot;
1890
1891         mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1892         if (!(dquot = dqget(sb, id, type))) {
1893                 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1894                 return -ESRCH;
1895         }
1896         do_get_dqblk(dquot, di);
1897         dqput(dquot);
1898         mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1899         return 0;
1900 }
1901
1902 /* Generic routine for setting common part of quota structure */
1903 static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
1904 {
1905         struct mem_dqblk *dm = &dquot->dq_dqb;
1906         int check_blim = 0, check_ilim = 0;
1907         struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
1908
1909         if ((di->dqb_valid & QIF_BLIMITS &&
1910              (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
1911               di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
1912             (di->dqb_valid & QIF_ILIMITS &&
1913              (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
1914               di->dqb_isoftlimit > dqi->dqi_maxilimit)))
1915                 return -ERANGE;
1916
1917         spin_lock(&dq_data_lock);
1918         if (di->dqb_valid & QIF_SPACE) {
1919                 dm->dqb_curspace = di->dqb_curspace;
1920                 check_blim = 1;
1921         }
1922         if (di->dqb_valid & QIF_BLIMITS) {
1923                 dm->dqb_bsoftlimit = di->dqb_bsoftlimit;
1924                 dm->dqb_bhardlimit = di->dqb_bhardlimit;
1925                 check_blim = 1;
1926         }
1927         if (di->dqb_valid & QIF_INODES) {
1928                 dm->dqb_curinodes = di->dqb_curinodes;
1929                 check_ilim = 1;
1930         }
1931         if (di->dqb_valid & QIF_ILIMITS) {
1932                 dm->dqb_isoftlimit = di->dqb_isoftlimit;
1933                 dm->dqb_ihardlimit = di->dqb_ihardlimit;
1934                 check_ilim = 1;
1935         }
1936         if (di->dqb_valid & QIF_BTIME)
1937                 dm->dqb_btime = di->dqb_btime;
1938         if (di->dqb_valid & QIF_ITIME)
1939                 dm->dqb_itime = di->dqb_itime;
1940
1941         if (check_blim) {
1942                 if (!dm->dqb_bsoftlimit || toqb(dm->dqb_curspace) < dm->dqb_bsoftlimit) {
1943                         dm->dqb_btime = 0;
1944                         clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1945                 }
1946                 else if (!(di->dqb_valid & QIF_BTIME))  /* Set grace only if user hasn't provided his own... */
1947                         dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
1948         }
1949         if (check_ilim) {
1950                 if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
1951                         dm->dqb_itime = 0;
1952                         clear_bit(DQ_INODES_B, &dquot->dq_flags);
1953                 }
1954                 else if (!(di->dqb_valid & QIF_ITIME))  /* Set grace only if user hasn't provided his own... */
1955                         dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
1956         }
1957         if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
1958                 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
1959         else
1960                 set_bit(DQ_FAKE_B, &dquot->dq_flags);
1961         spin_unlock(&dq_data_lock);
1962         mark_dquot_dirty(dquot);
1963
1964         return 0;
1965 }
1966
1967 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
1968 {
1969         struct dquot *dquot;
1970         int rc;
1971
1972         mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1973         if (!(dquot = dqget(sb, id, type))) {
1974                 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1975                 return -ESRCH;
1976         }
1977         rc = do_set_dqblk(dquot, di);
1978         dqput(dquot);
1979         mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1980         return rc;
1981 }
1982
1983 /* Generic routine for getting common part of quota file information */
1984 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
1985 {
1986         struct mem_dqinfo *mi;
1987   
1988         mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
1989         if (!sb_has_quota_enabled(sb, type)) {
1990                 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
1991                 return -ESRCH;
1992         }
1993         mi = sb_dqopt(sb)->info + type;
1994         spin_lock(&dq_data_lock);
1995         ii->dqi_bgrace = mi->dqi_bgrace;
1996         ii->dqi_igrace = mi->dqi_igrace;
1997         ii->dqi_flags = mi->dqi_flags & DQF_MASK;
1998         ii->dqi_valid = IIF_ALL;
1999         spin_unlock(&dq_data_lock);
2000         mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2001         return 0;
2002 }
2003
2004 /* Generic routine for setting common part of quota file information */
2005 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
2006 {
2007         struct mem_dqinfo *mi;
2008
2009         mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
2010         if (!sb_has_quota_enabled(sb, type)) {
2011                 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2012                 return -ESRCH;
2013         }
2014         mi = sb_dqopt(sb)->info + type;
2015         spin_lock(&dq_data_lock);
2016         if (ii->dqi_valid & IIF_BGRACE)
2017                 mi->dqi_bgrace = ii->dqi_bgrace;
2018         if (ii->dqi_valid & IIF_IGRACE)
2019                 mi->dqi_igrace = ii->dqi_igrace;
2020         if (ii->dqi_valid & IIF_FLAGS)
2021                 mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
2022         spin_unlock(&dq_data_lock);
2023         mark_info_dirty(sb, type);
2024         /* Force write to disk */
2025         sb->dq_op->write_info(sb, type);
2026         mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
2027         return 0;
2028 }
2029
2030 struct quotactl_ops vfs_quotactl_ops = {
2031         .quota_on       = vfs_quota_on,
2032         .quota_off      = vfs_quota_off,
2033         .quota_sync     = vfs_quota_sync,
2034         .get_info       = vfs_get_dqinfo,
2035         .set_info       = vfs_set_dqinfo,
2036         .get_dqblk      = vfs_get_dqblk,
2037         .set_dqblk      = vfs_set_dqblk
2038 };
2039
2040 static ctl_table fs_dqstats_table[] = {
2041         {
2042                 .ctl_name       = FS_DQ_LOOKUPS,
2043                 .procname       = "lookups",
2044                 .data           = &dqstats.lookups,
2045                 .maxlen         = sizeof(int),
2046                 .mode           = 0444,
2047                 .proc_handler   = &proc_dointvec,
2048         },
2049         {
2050                 .ctl_name       = FS_DQ_DROPS,
2051                 .procname       = "drops",
2052                 .data           = &dqstats.drops,
2053                 .maxlen         = sizeof(int),
2054                 .mode           = 0444,
2055                 .proc_handler   = &proc_dointvec,
2056         },
2057         {
2058                 .ctl_name       = FS_DQ_READS,
2059                 .procname       = "reads",
2060                 .data           = &dqstats.reads,
2061                 .maxlen         = sizeof(int),
2062                 .mode           = 0444,
2063                 .proc_handler   = &proc_dointvec,
2064         },
2065         {
2066                 .ctl_name       = FS_DQ_WRITES,
2067                 .procname       = "writes",
2068                 .data           = &dqstats.writes,
2069                 .maxlen         = sizeof(int),
2070                 .mode           = 0444,
2071                 .proc_handler   = &proc_dointvec,
2072         },
2073         {
2074                 .ctl_name       = FS_DQ_CACHE_HITS,
2075                 .procname       = "cache_hits",
2076                 .data           = &dqstats.cache_hits,
2077                 .maxlen         = sizeof(int),
2078                 .mode           = 0444,
2079                 .proc_handler   = &proc_dointvec,
2080         },
2081         {
2082                 .ctl_name       = FS_DQ_ALLOCATED,
2083                 .procname       = "allocated_dquots",
2084                 .data           = &dqstats.allocated_dquots,
2085                 .maxlen         = sizeof(int),
2086                 .mode           = 0444,
2087                 .proc_handler   = &proc_dointvec,
2088         },
2089         {
2090                 .ctl_name       = FS_DQ_FREE,
2091                 .procname       = "free_dquots",
2092                 .data           = &dqstats.free_dquots,
2093                 .maxlen         = sizeof(int),
2094                 .mode           = 0444,
2095                 .proc_handler   = &proc_dointvec,
2096         },
2097         {
2098                 .ctl_name       = FS_DQ_SYNCS,
2099                 .procname       = "syncs",
2100                 .data           = &dqstats.syncs,
2101                 .maxlen         = sizeof(int),
2102                 .mode           = 0444,
2103                 .proc_handler   = &proc_dointvec,
2104         },
2105 #ifdef CONFIG_PRINT_QUOTA_WARNING
2106         {
2107                 .ctl_name       = FS_DQ_WARNINGS,
2108                 .procname       = "warnings",
2109                 .data           = &flag_print_warnings,
2110                 .maxlen         = sizeof(int),
2111                 .mode           = 0644,
2112                 .proc_handler   = &proc_dointvec,
2113         },
2114 #endif
2115         { .ctl_name = 0 },
2116 };
2117
2118 static ctl_table fs_table[] = {
2119         {
2120                 .ctl_name       = FS_DQSTATS,
2121                 .procname       = "quota",
2122                 .mode           = 0555,
2123                 .child          = fs_dqstats_table,
2124         },
2125         { .ctl_name = 0 },
2126 };
2127
2128 static ctl_table sys_table[] = {
2129         {
2130                 .ctl_name       = CTL_FS,
2131                 .procname       = "fs",
2132                 .mode           = 0555,
2133                 .child          = fs_table,
2134         },
2135         { .ctl_name = 0 },
2136 };
2137
2138 static int __init dquot_init(void)
2139 {
2140         int i;
2141         unsigned long nr_hash, order;
2142
2143         printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2144
2145         register_sysctl_table(sys_table);
2146
2147         dquot_cachep = kmem_cache_create("dquot",
2148                         sizeof(struct dquot), sizeof(unsigned long) * 4,
2149                         (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2150                                 SLAB_MEM_SPREAD|SLAB_PANIC),
2151                         NULL);
2152
2153         order = 0;
2154         dquot_hash = (struct hlist_head *)__get_free_pages(GFP_ATOMIC, order);
2155         if (!dquot_hash)
2156                 panic("Cannot create dquot hash table");
2157
2158         /* Find power-of-two hlist_heads which can fit into allocation */
2159         nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2160         dq_hash_bits = 0;
2161         do {
2162                 dq_hash_bits++;
2163         } while (nr_hash >> dq_hash_bits);
2164         dq_hash_bits--;
2165
2166         nr_hash = 1UL << dq_hash_bits;
2167         dq_hash_mask = nr_hash - 1;
2168         for (i = 0; i < nr_hash; i++)
2169                 INIT_HLIST_HEAD(dquot_hash + i);
2170
2171         printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
2172                         nr_hash, order, (PAGE_SIZE << order));
2173
2174         register_shrinker(&dqcache_shrinker);
2175
2176 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
2177         if (genl_register_family(&quota_genl_family) != 0)
2178                 printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
2179 #endif
2180
2181         return 0;
2182 }
2183 module_init(dquot_init);
2184
2185 EXPORT_SYMBOL(register_quota_format);
2186 EXPORT_SYMBOL(unregister_quota_format);
2187 EXPORT_SYMBOL(dqstats);
2188 EXPORT_SYMBOL(dq_data_lock);
2189 EXPORT_SYMBOL(vfs_quota_on);
2190 EXPORT_SYMBOL(vfs_quota_on_path);
2191 EXPORT_SYMBOL(vfs_quota_on_mount);
2192 EXPORT_SYMBOL(vfs_quota_off);
2193 EXPORT_SYMBOL(vfs_quota_sync);
2194 EXPORT_SYMBOL(vfs_get_dqinfo);
2195 EXPORT_SYMBOL(vfs_set_dqinfo);
2196 EXPORT_SYMBOL(vfs_get_dqblk);
2197 EXPORT_SYMBOL(vfs_set_dqblk);
2198 EXPORT_SYMBOL(dquot_commit);
2199 EXPORT_SYMBOL(dquot_commit_info);
2200 EXPORT_SYMBOL(dquot_acquire);
2201 EXPORT_SYMBOL(dquot_release);
2202 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
2203 EXPORT_SYMBOL(dquot_initialize);
2204 EXPORT_SYMBOL(dquot_drop);
2205 EXPORT_SYMBOL(vfs_dq_drop);
2206 EXPORT_SYMBOL(dquot_alloc_space);
2207 EXPORT_SYMBOL(dquot_alloc_inode);
2208 EXPORT_SYMBOL(dquot_free_space);
2209 EXPORT_SYMBOL(dquot_free_inode);
2210 EXPORT_SYMBOL(dquot_transfer);
2211 EXPORT_SYMBOL(vfs_dq_transfer);
2212 EXPORT_SYMBOL(vfs_dq_quota_on_remount);