parport_pc: set properly the dma_mask for parport_pc device
[linux-2.6] / fs / xfs / quota / xfs_trans_dquot.c
1 /*
2  * Copyright (c) 2000-2002 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_itable.h"
40 #include "xfs_btree.h"
41 #include "xfs_bmap.h"
42 #include "xfs_rtalloc.h"
43 #include "xfs_error.h"
44 #include "xfs_rw.h"
45 #include "xfs_attr.h"
46 #include "xfs_buf_item.h"
47 #include "xfs_trans_priv.h"
48 #include "xfs_qm.h"
49
50 STATIC void     xfs_trans_alloc_dqinfo(xfs_trans_t *);
51
52 /*
53  * Add the locked dquot to the transaction.
54  * The dquot must be locked, and it cannot be associated with any
55  * transaction.
56  */
57 void
58 xfs_trans_dqjoin(
59         xfs_trans_t     *tp,
60         xfs_dquot_t     *dqp)
61 {
62         xfs_dq_logitem_t    *lp;
63
64         ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
65         ASSERT(XFS_DQ_IS_LOCKED(dqp));
66         ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp));
67         lp = &dqp->q_logitem;
68
69         /*
70          * Get a log_item_desc to point at the new item.
71          */
72         (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp));
73
74         /*
75          * Initialize i_transp so we can later determine if this dquot is
76          * associated with this transaction.
77          */
78         dqp->q_transp = tp;
79 }
80
81
82 /*
83  * This is called to mark the dquot as needing
84  * to be logged when the transaction is committed.  The dquot must
85  * already be associated with the given transaction.
86  * Note that it marks the entire transaction as dirty. In the ordinary
87  * case, this gets called via xfs_trans_commit, after the transaction
88  * is already dirty. However, there's nothing stop this from getting
89  * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
90  * flag.
91  */
92 void
93 xfs_trans_log_dquot(
94         xfs_trans_t     *tp,
95         xfs_dquot_t     *dqp)
96 {
97         xfs_log_item_desc_t     *lidp;
98
99         ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
100         ASSERT(XFS_DQ_IS_LOCKED(dqp));
101
102         lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(&dqp->q_logitem));
103         ASSERT(lidp != NULL);
104
105         tp->t_flags |= XFS_TRANS_DIRTY;
106         lidp->lid_flags |= XFS_LID_DIRTY;
107 }
108
109 /*
110  * Carry forward whatever is left of the quota blk reservation to
111  * the spanky new transaction
112  */
113 void
114 xfs_trans_dup_dqinfo(
115         xfs_trans_t     *otp,
116         xfs_trans_t     *ntp)
117 {
118         xfs_dqtrx_t     *oq, *nq;
119         int             i,j;
120         xfs_dqtrx_t     *oqa, *nqa;
121
122         if (!otp->t_dqinfo)
123                 return;
124
125         xfs_trans_alloc_dqinfo(ntp);
126         oqa = otp->t_dqinfo->dqa_usrdquots;
127         nqa = ntp->t_dqinfo->dqa_usrdquots;
128
129         /*
130          * Because the quota blk reservation is carried forward,
131          * it is also necessary to carry forward the DQ_DIRTY flag.
132          */
133         if(otp->t_flags & XFS_TRANS_DQ_DIRTY)
134                 ntp->t_flags |= XFS_TRANS_DQ_DIRTY;
135
136         for (j = 0; j < 2; j++) {
137                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
138                         if (oqa[i].qt_dquot == NULL)
139                                 break;
140                         oq = &oqa[i];
141                         nq = &nqa[i];
142
143                         nq->qt_dquot = oq->qt_dquot;
144                         nq->qt_bcount_delta = nq->qt_icount_delta = 0;
145                         nq->qt_rtbcount_delta = 0;
146
147                         /*
148                          * Transfer whatever is left of the reservations.
149                          */
150                         nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used;
151                         oq->qt_blk_res = oq->qt_blk_res_used;
152
153                         nq->qt_rtblk_res = oq->qt_rtblk_res -
154                                 oq->qt_rtblk_res_used;
155                         oq->qt_rtblk_res = oq->qt_rtblk_res_used;
156
157                         nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
158                         oq->qt_ino_res = oq->qt_ino_res_used;
159
160                 }
161                 oqa = otp->t_dqinfo->dqa_grpdquots;
162                 nqa = ntp->t_dqinfo->dqa_grpdquots;
163         }
164 }
165
166 /*
167  * Wrap around mod_dquot to account for both user and group quotas.
168  */
169 void
170 xfs_trans_mod_dquot_byino(
171         xfs_trans_t     *tp,
172         xfs_inode_t     *ip,
173         uint            field,
174         long            delta)
175 {
176         xfs_mount_t     *mp = tp->t_mountp;
177
178         if (!XFS_IS_QUOTA_RUNNING(mp) ||
179             !XFS_IS_QUOTA_ON(mp) ||
180             ip->i_ino == mp->m_sb.sb_uquotino ||
181             ip->i_ino == mp->m_sb.sb_gquotino)
182                 return;
183
184         if (tp->t_dqinfo == NULL)
185                 xfs_trans_alloc_dqinfo(tp);
186
187         if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
188                 (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
189         if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
190                 (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
191 }
192
193 STATIC xfs_dqtrx_t *
194 xfs_trans_get_dqtrx(
195         xfs_trans_t     *tp,
196         xfs_dquot_t     *dqp)
197 {
198         int             i;
199         xfs_dqtrx_t     *qa;
200
201         for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
202                 qa = XFS_QM_DQP_TO_DQACCT(tp, dqp);
203
204                 if (qa[i].qt_dquot == NULL ||
205                     qa[i].qt_dquot == dqp) {
206                         return (&qa[i]);
207                 }
208         }
209
210         return (NULL);
211 }
212
213 /*
214  * Make the changes in the transaction structure.
215  * The moral equivalent to xfs_trans_mod_sb().
216  * We don't touch any fields in the dquot, so we don't care
217  * if it's locked or not (most of the time it won't be).
218  */
219 void
220 xfs_trans_mod_dquot(
221         xfs_trans_t     *tp,
222         xfs_dquot_t     *dqp,
223         uint            field,
224         long            delta)
225 {
226         xfs_dqtrx_t     *qtrx;
227
228         ASSERT(tp);
229         ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
230         qtrx = NULL;
231
232         if (tp->t_dqinfo == NULL)
233                 xfs_trans_alloc_dqinfo(tp);
234         /*
235          * Find either the first free slot or the slot that belongs
236          * to this dquot.
237          */
238         qtrx = xfs_trans_get_dqtrx(tp, dqp);
239         ASSERT(qtrx);
240         if (qtrx->qt_dquot == NULL)
241                 qtrx->qt_dquot = dqp;
242
243         switch (field) {
244
245                 /*
246                  * regular disk blk reservation
247                  */
248               case XFS_TRANS_DQ_RES_BLKS:
249                 qtrx->qt_blk_res += (ulong)delta;
250                 break;
251
252                 /*
253                  * inode reservation
254                  */
255               case XFS_TRANS_DQ_RES_INOS:
256                 qtrx->qt_ino_res += (ulong)delta;
257                 break;
258
259                 /*
260                  * disk blocks used.
261                  */
262               case XFS_TRANS_DQ_BCOUNT:
263                 if (qtrx->qt_blk_res && delta > 0) {
264                         qtrx->qt_blk_res_used += (ulong)delta;
265                         ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used);
266                 }
267                 qtrx->qt_bcount_delta += delta;
268                 break;
269
270               case XFS_TRANS_DQ_DELBCOUNT:
271                 qtrx->qt_delbcnt_delta += delta;
272                 break;
273
274                 /*
275                  * Inode Count
276                  */
277               case XFS_TRANS_DQ_ICOUNT:
278                 if (qtrx->qt_ino_res && delta > 0) {
279                         qtrx->qt_ino_res_used += (ulong)delta;
280                         ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
281                 }
282                 qtrx->qt_icount_delta += delta;
283                 break;
284
285                 /*
286                  * rtblk reservation
287                  */
288               case XFS_TRANS_DQ_RES_RTBLKS:
289                 qtrx->qt_rtblk_res += (ulong)delta;
290                 break;
291
292                 /*
293                  * rtblk count
294                  */
295               case XFS_TRANS_DQ_RTBCOUNT:
296                 if (qtrx->qt_rtblk_res && delta > 0) {
297                         qtrx->qt_rtblk_res_used += (ulong)delta;
298                         ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
299                 }
300                 qtrx->qt_rtbcount_delta += delta;
301                 break;
302
303               case XFS_TRANS_DQ_DELRTBCOUNT:
304                 qtrx->qt_delrtb_delta += delta;
305                 break;
306
307               default:
308                 ASSERT(0);
309         }
310         tp->t_flags |= XFS_TRANS_DQ_DIRTY;
311 }
312
313
314 /*
315  * Given an array of dqtrx structures, lock all the dquots associated
316  * and join them to the transaction, provided they have been modified.
317  * We know that the highest number of dquots (of one type - usr OR grp),
318  * involved in a transaction is 2 and that both usr and grp combined - 3.
319  * So, we don't attempt to make this very generic.
320  */
321 STATIC void
322 xfs_trans_dqlockedjoin(
323         xfs_trans_t     *tp,
324         xfs_dqtrx_t     *q)
325 {
326         ASSERT(q[0].qt_dquot != NULL);
327         if (q[1].qt_dquot == NULL) {
328                 xfs_dqlock(q[0].qt_dquot);
329                 xfs_trans_dqjoin(tp, q[0].qt_dquot);
330         } else {
331                 ASSERT(XFS_QM_TRANS_MAXDQS == 2);
332                 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
333                 xfs_trans_dqjoin(tp, q[0].qt_dquot);
334                 xfs_trans_dqjoin(tp, q[1].qt_dquot);
335         }
336 }
337
338
339 /*
340  * Called by xfs_trans_commit() and similar in spirit to
341  * xfs_trans_apply_sb_deltas().
342  * Go thru all the dquots belonging to this transaction and modify the
343  * INCORE dquot to reflect the actual usages.
344  * Unreserve just the reservations done by this transaction.
345  * dquot is still left locked at exit.
346  */
347 void
348 xfs_trans_apply_dquot_deltas(
349         xfs_trans_t             *tp)
350 {
351         int                     i, j;
352         xfs_dquot_t             *dqp;
353         xfs_dqtrx_t             *qtrx, *qa;
354         xfs_disk_dquot_t        *d;
355         long                    totalbdelta;
356         long                    totalrtbdelta;
357
358         if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY))
359                 return;
360
361         ASSERT(tp->t_dqinfo);
362         qa = tp->t_dqinfo->dqa_usrdquots;
363         for (j = 0; j < 2; j++) {
364                 if (qa[0].qt_dquot == NULL) {
365                         qa = tp->t_dqinfo->dqa_grpdquots;
366                         continue;
367                 }
368
369                 /*
370                  * Lock all of the dquots and join them to the transaction.
371                  */
372                 xfs_trans_dqlockedjoin(tp, qa);
373
374                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
375                         qtrx = &qa[i];
376                         /*
377                          * The array of dquots is filled
378                          * sequentially, not sparsely.
379                          */
380                         if ((dqp = qtrx->qt_dquot) == NULL)
381                                 break;
382
383                         ASSERT(XFS_DQ_IS_LOCKED(dqp));
384                         ASSERT(XFS_DQ_IS_ADDEDTO_TRX(tp, dqp));
385
386                         /*
387                          * adjust the actual number of blocks used
388                          */
389                         d = &dqp->q_core;
390
391                         /*
392                          * The issue here is - sometimes we don't make a blkquota
393                          * reservation intentionally to be fair to users
394                          * (when the amount is small). On the other hand,
395                          * delayed allocs do make reservations, but that's
396                          * outside of a transaction, so we have no
397                          * idea how much was really reserved.
398                          * So, here we've accumulated delayed allocation blks and
399                          * non-delay blks. The assumption is that the
400                          * delayed ones are always reserved (outside of a
401                          * transaction), and the others may or may not have
402                          * quota reservations.
403                          */
404                         totalbdelta = qtrx->qt_bcount_delta +
405                                 qtrx->qt_delbcnt_delta;
406                         totalrtbdelta = qtrx->qt_rtbcount_delta +
407                                 qtrx->qt_delrtb_delta;
408 #ifdef QUOTADEBUG
409                         if (totalbdelta < 0)
410                                 ASSERT(be64_to_cpu(d->d_bcount) >=
411                                        (xfs_qcnt_t) -totalbdelta);
412
413                         if (totalrtbdelta < 0)
414                                 ASSERT(be64_to_cpu(d->d_rtbcount) >=
415                                        (xfs_qcnt_t) -totalrtbdelta);
416
417                         if (qtrx->qt_icount_delta < 0)
418                                 ASSERT(be64_to_cpu(d->d_icount) >=
419                                        (xfs_qcnt_t) -qtrx->qt_icount_delta);
420 #endif
421                         if (totalbdelta)
422                                 be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
423
424                         if (qtrx->qt_icount_delta)
425                                 be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta);
426
427                         if (totalrtbdelta)
428                                 be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta);
429
430                         /*
431                          * Get any default limits in use.
432                          * Start/reset the timer(s) if needed.
433                          */
434                         if (d->d_id) {
435                                 xfs_qm_adjust_dqlimits(tp->t_mountp, d);
436                                 xfs_qm_adjust_dqtimers(tp->t_mountp, d);
437                         }
438
439                         dqp->dq_flags |= XFS_DQ_DIRTY;
440                         /*
441                          * add this to the list of items to get logged
442                          */
443                         xfs_trans_log_dquot(tp, dqp);
444                         /*
445                          * Take off what's left of the original reservation.
446                          * In case of delayed allocations, there's no
447                          * reservation that a transaction structure knows of.
448                          */
449                         if (qtrx->qt_blk_res != 0) {
450                                 if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) {
451                                         if (qtrx->qt_blk_res >
452                                             qtrx->qt_blk_res_used)
453                                                 dqp->q_res_bcount -= (xfs_qcnt_t)
454                                                         (qtrx->qt_blk_res -
455                                                          qtrx->qt_blk_res_used);
456                                         else
457                                                 dqp->q_res_bcount -= (xfs_qcnt_t)
458                                                         (qtrx->qt_blk_res_used -
459                                                          qtrx->qt_blk_res);
460                                 }
461                         } else {
462                                 /*
463                                  * These blks were never reserved, either inside
464                                  * a transaction or outside one (in a delayed
465                                  * allocation). Also, this isn't always a
466                                  * negative number since we sometimes
467                                  * deliberately skip quota reservations.
468                                  */
469                                 if (qtrx->qt_bcount_delta) {
470                                         dqp->q_res_bcount +=
471                                               (xfs_qcnt_t)qtrx->qt_bcount_delta;
472                                 }
473                         }
474                         /*
475                          * Adjust the RT reservation.
476                          */
477                         if (qtrx->qt_rtblk_res != 0) {
478                                 if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) {
479                                         if (qtrx->qt_rtblk_res >
480                                             qtrx->qt_rtblk_res_used)
481                                                dqp->q_res_rtbcount -= (xfs_qcnt_t)
482                                                        (qtrx->qt_rtblk_res -
483                                                         qtrx->qt_rtblk_res_used);
484                                         else
485                                                dqp->q_res_rtbcount -= (xfs_qcnt_t)
486                                                        (qtrx->qt_rtblk_res_used -
487                                                         qtrx->qt_rtblk_res);
488                                 }
489                         } else {
490                                 if (qtrx->qt_rtbcount_delta)
491                                         dqp->q_res_rtbcount +=
492                                             (xfs_qcnt_t)qtrx->qt_rtbcount_delta;
493                         }
494
495                         /*
496                          * Adjust the inode reservation.
497                          */
498                         if (qtrx->qt_ino_res != 0) {
499                                 ASSERT(qtrx->qt_ino_res >=
500                                        qtrx->qt_ino_res_used);
501                                 if (qtrx->qt_ino_res > qtrx->qt_ino_res_used)
502                                         dqp->q_res_icount -= (xfs_qcnt_t)
503                                                 (qtrx->qt_ino_res -
504                                                  qtrx->qt_ino_res_used);
505                         } else {
506                                 if (qtrx->qt_icount_delta)
507                                         dqp->q_res_icount +=
508                                             (xfs_qcnt_t)qtrx->qt_icount_delta;
509                         }
510
511                         ASSERT(dqp->q_res_bcount >=
512                                 be64_to_cpu(dqp->q_core.d_bcount));
513                         ASSERT(dqp->q_res_icount >=
514                                 be64_to_cpu(dqp->q_core.d_icount));
515                         ASSERT(dqp->q_res_rtbcount >=
516                                 be64_to_cpu(dqp->q_core.d_rtbcount));
517                 }
518                 /*
519                  * Do the group quotas next
520                  */
521                 qa = tp->t_dqinfo->dqa_grpdquots;
522         }
523 }
524
525 /*
526  * Release the reservations, and adjust the dquots accordingly.
527  * This is called only when the transaction is being aborted. If by
528  * any chance we have done dquot modifications incore (ie. deltas) already,
529  * we simply throw those away, since that's the expected behavior
530  * when a transaction is curtailed without a commit.
531  */
532 void
533 xfs_trans_unreserve_and_mod_dquots(
534         xfs_trans_t             *tp)
535 {
536         int                     i, j;
537         xfs_dquot_t             *dqp;
538         xfs_dqtrx_t             *qtrx, *qa;
539         boolean_t               locked;
540
541         if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
542                 return;
543
544         qa = tp->t_dqinfo->dqa_usrdquots;
545
546         for (j = 0; j < 2; j++) {
547                 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
548                         qtrx = &qa[i];
549                         /*
550                          * We assume that the array of dquots is filled
551                          * sequentially, not sparsely.
552                          */
553                         if ((dqp = qtrx->qt_dquot) == NULL)
554                                 break;
555                         /*
556                          * Unreserve the original reservation. We don't care
557                          * about the number of blocks used field, or deltas.
558                          * Also we don't bother to zero the fields.
559                          */
560                         locked = B_FALSE;
561                         if (qtrx->qt_blk_res) {
562                                 xfs_dqlock(dqp);
563                                 locked = B_TRUE;
564                                 dqp->q_res_bcount -=
565                                         (xfs_qcnt_t)qtrx->qt_blk_res;
566                         }
567                         if (qtrx->qt_ino_res) {
568                                 if (!locked) {
569                                         xfs_dqlock(dqp);
570                                         locked = B_TRUE;
571                                 }
572                                 dqp->q_res_icount -=
573                                         (xfs_qcnt_t)qtrx->qt_ino_res;
574                         }
575
576                         if (qtrx->qt_rtblk_res) {
577                                 if (!locked) {
578                                         xfs_dqlock(dqp);
579                                         locked = B_TRUE;
580                                 }
581                                 dqp->q_res_rtbcount -=
582                                         (xfs_qcnt_t)qtrx->qt_rtblk_res;
583                         }
584                         if (locked)
585                                 xfs_dqunlock(dqp);
586
587                 }
588                 qa = tp->t_dqinfo->dqa_grpdquots;
589         }
590 }
591
592 STATIC int
593 xfs_quota_error(uint flags)
594 {
595         if (flags & XFS_QMOPT_ENOSPC)
596                 return ENOSPC;
597         return EDQUOT;
598 }
599
600 /*
601  * This reserves disk blocks and inodes against a dquot.
602  * Flags indicate if the dquot is to be locked here and also
603  * if the blk reservation is for RT or regular blocks.
604  * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
605  */
606 STATIC int
607 xfs_trans_dqresv(
608         xfs_trans_t     *tp,
609         xfs_mount_t     *mp,
610         xfs_dquot_t     *dqp,
611         long            nblks,
612         long            ninos,
613         uint            flags)
614 {
615         int             error;
616         xfs_qcnt_t      hardlimit;
617         xfs_qcnt_t      softlimit;
618         time_t          timer;
619         xfs_qwarncnt_t  warns;
620         xfs_qwarncnt_t  warnlimit;
621         xfs_qcnt_t      count;
622         xfs_qcnt_t      *resbcountp;
623         xfs_quotainfo_t *q = mp->m_quotainfo;
624
625
626         xfs_dqlock(dqp);
627
628         if (flags & XFS_TRANS_DQ_RES_BLKS) {
629                 hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
630                 if (!hardlimit)
631                         hardlimit = q->qi_bhardlimit;
632                 softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
633                 if (!softlimit)
634                         softlimit = q->qi_bsoftlimit;
635                 timer = be32_to_cpu(dqp->q_core.d_btimer);
636                 warns = be16_to_cpu(dqp->q_core.d_bwarns);
637                 warnlimit = XFS_QI_BWARNLIMIT(dqp->q_mount);
638                 resbcountp = &dqp->q_res_bcount;
639         } else {
640                 ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
641                 hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
642                 if (!hardlimit)
643                         hardlimit = q->qi_rtbhardlimit;
644                 softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
645                 if (!softlimit)
646                         softlimit = q->qi_rtbsoftlimit;
647                 timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
648                 warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
649                 warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
650                 resbcountp = &dqp->q_res_rtbcount;
651         }
652         error = 0;
653
654         if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
655             dqp->q_core.d_id &&
656             ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
657              (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
658               (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
659 #ifdef QUOTADEBUG
660                 cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
661                           " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
662 #endif
663                 if (nblks > 0) {
664                         /*
665                          * dquot is locked already. See if we'd go over the
666                          * hardlimit or exceed the timelimit if we allocate
667                          * nblks.
668                          */
669                         if (hardlimit > 0ULL &&
670                              (hardlimit <= nblks + *resbcountp)) {
671                                 error = xfs_quota_error(flags);
672                                 goto error_return;
673                         }
674
675                         if (softlimit > 0ULL &&
676                              (softlimit <= nblks + *resbcountp)) {
677                                 if ((timer != 0 && get_seconds() > timer) ||
678                                     (warns != 0 && warns >= warnlimit)) {
679                                         error = xfs_quota_error(flags);
680                                         goto error_return;
681                                 }
682                         }
683                 }
684                 if (ninos > 0) {
685                         count = be64_to_cpu(dqp->q_core.d_icount);
686                         timer = be32_to_cpu(dqp->q_core.d_itimer);
687                         warns = be16_to_cpu(dqp->q_core.d_iwarns);
688                         warnlimit = XFS_QI_IWARNLIMIT(dqp->q_mount);
689                         hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
690                         if (!hardlimit)
691                                 hardlimit = q->qi_ihardlimit;
692                         softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
693                         if (!softlimit)
694                                 softlimit = q->qi_isoftlimit;
695                         if (hardlimit > 0ULL && count >= hardlimit) {
696                                 error = xfs_quota_error(flags);
697                                 goto error_return;
698                         } else if (softlimit > 0ULL && count >= softlimit) {
699                                 if ((timer != 0 && get_seconds() > timer) ||
700                                      (warns != 0 && warns >= warnlimit)) {
701                                         error = xfs_quota_error(flags);
702                                         goto error_return;
703                                 }
704                         }
705                 }
706         }
707
708         /*
709          * Change the reservation, but not the actual usage.
710          * Note that q_res_bcount = q_core.d_bcount + resv
711          */
712         (*resbcountp) += (xfs_qcnt_t)nblks;
713         if (ninos != 0)
714                 dqp->q_res_icount += (xfs_qcnt_t)ninos;
715
716         /*
717          * note the reservation amt in the trans struct too,
718          * so that the transaction knows how much was reserved by
719          * it against this particular dquot.
720          * We don't do this when we are reserving for a delayed allocation,
721          * because we don't have the luxury of a transaction envelope then.
722          */
723         if (tp) {
724                 ASSERT(tp->t_dqinfo);
725                 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
726                 if (nblks != 0)
727                         xfs_trans_mod_dquot(tp, dqp,
728                                             flags & XFS_QMOPT_RESBLK_MASK,
729                                             nblks);
730                 if (ninos != 0)
731                         xfs_trans_mod_dquot(tp, dqp,
732                                             XFS_TRANS_DQ_RES_INOS,
733                                             ninos);
734         }
735         ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount));
736         ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
737         ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
738
739 error_return:
740         xfs_dqunlock(dqp);
741         return error;
742 }
743
744
745 /*
746  * Given dquot(s), make disk block and/or inode reservations against them.
747  * The fact that this does the reservation against both the usr and
748  * grp/prj quotas is important, because this follows a both-or-nothing
749  * approach.
750  *
751  * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
752  *         XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT.  Used by pquota.
753  *         XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
754  *         XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
755  * dquots are unlocked on return, if they were not locked by caller.
756  */
757 int
758 xfs_trans_reserve_quota_bydquots(
759         xfs_trans_t     *tp,
760         xfs_mount_t     *mp,
761         xfs_dquot_t     *udqp,
762         xfs_dquot_t     *gdqp,
763         long            nblks,
764         long            ninos,
765         uint            flags)
766 {
767         int             resvd = 0, error;
768
769         if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
770                 return 0;
771
772         if (tp && tp->t_dqinfo == NULL)
773                 xfs_trans_alloc_dqinfo(tp);
774
775         ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
776
777         if (udqp) {
778                 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
779                                         (flags & ~XFS_QMOPT_ENOSPC));
780                 if (error)
781                         return error;
782                 resvd = 1;
783         }
784
785         if (gdqp) {
786                 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
787                 if (error) {
788                         /*
789                          * can't do it, so backout previous reservation
790                          */
791                         if (resvd) {
792                                 flags |= XFS_QMOPT_FORCE_RES;
793                                 xfs_trans_dqresv(tp, mp, udqp,
794                                                  -nblks, -ninos, flags);
795                         }
796                         return error;
797                 }
798         }
799
800         /*
801          * Didn't change anything critical, so, no need to log
802          */
803         return 0;
804 }
805
806
807 /*
808  * Lock the dquot and change the reservation if we can.
809  * This doesn't change the actual usage, just the reservation.
810  * The inode sent in is locked.
811  */
812 int
813 xfs_trans_reserve_quota_nblks(
814         struct xfs_trans        *tp,
815         struct xfs_inode        *ip,
816         long                    nblks,
817         long                    ninos,
818         uint                    flags)
819 {
820         struct xfs_mount        *mp = ip->i_mount;
821
822         if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
823                 return 0;
824         if (XFS_IS_PQUOTA_ON(mp))
825                 flags |= XFS_QMOPT_ENOSPC;
826
827         ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
828         ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
829
830         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
831         ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
832                                 XFS_TRANS_DQ_RES_RTBLKS ||
833                (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
834                                 XFS_TRANS_DQ_RES_BLKS);
835
836         /*
837          * Reserve nblks against these dquots, with trans as the mediator.
838          */
839         return xfs_trans_reserve_quota_bydquots(tp, mp,
840                                                 ip->i_udquot, ip->i_gdquot,
841                                                 nblks, ninos, flags);
842 }
843
844 /*
845  * This routine is called to allocate a quotaoff log item.
846  */
847 xfs_qoff_logitem_t *
848 xfs_trans_get_qoff_item(
849         xfs_trans_t             *tp,
850         xfs_qoff_logitem_t      *startqoff,
851         uint                    flags)
852 {
853         xfs_qoff_logitem_t      *q;
854
855         ASSERT(tp != NULL);
856
857         q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags);
858         ASSERT(q != NULL);
859
860         /*
861          * Get a log_item_desc to point at the new item.
862          */
863         (void) xfs_trans_add_item(tp, (xfs_log_item_t*)q);
864
865         return (q);
866 }
867
868
869 /*
870  * This is called to mark the quotaoff logitem as needing
871  * to be logged when the transaction is committed.  The logitem must
872  * already be associated with the given transaction.
873  */
874 void
875 xfs_trans_log_quotaoff_item(
876         xfs_trans_t             *tp,
877         xfs_qoff_logitem_t      *qlp)
878 {
879         xfs_log_item_desc_t     *lidp;
880
881         lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)qlp);
882         ASSERT(lidp != NULL);
883
884         tp->t_flags |= XFS_TRANS_DIRTY;
885         lidp->lid_flags |= XFS_LID_DIRTY;
886 }
887
888 STATIC void
889 xfs_trans_alloc_dqinfo(
890         xfs_trans_t     *tp)
891 {
892         tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
893 }
894
895 void
896 xfs_trans_free_dqinfo(
897         xfs_trans_t     *tp)
898 {
899         if (!tp->t_dqinfo)
900                 return;
901         kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo);
902         tp->t_dqinfo = NULL;
903 }