Merge branch 'smsc47b397-new-id' into release
[linux-2.6] / fs / xfs / quota / xfs_dquot_item.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_bmap.h"
39 #include "xfs_btree.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_itable.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_qm.h"
50
51 /*
52  * returns the number of iovecs needed to log the given dquot item.
53  */
54 /* ARGSUSED */
55 STATIC uint
56 xfs_qm_dquot_logitem_size(
57         xfs_dq_logitem_t        *logitem)
58 {
59         /*
60          * we need only two iovecs, one for the format, one for the real thing
61          */
62         return (2);
63 }
64
65 /*
66  * fills in the vector of log iovecs for the given dquot log item.
67  */
68 STATIC void
69 xfs_qm_dquot_logitem_format(
70         xfs_dq_logitem_t        *logitem,
71         xfs_log_iovec_t         *logvec)
72 {
73         ASSERT(logitem);
74         ASSERT(logitem->qli_dquot);
75
76         logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
77         logvec->i_len  = sizeof(xfs_dq_logformat_t);
78         XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
79         logvec++;
80         logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
81         logvec->i_len  = sizeof(xfs_disk_dquot_t);
82         XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
83
84         ASSERT(2 == logitem->qli_item.li_desc->lid_size);
85         logitem->qli_format.qlf_size = 2;
86
87 }
88
89 /*
90  * Increment the pin count of the given dquot.
91  * This value is protected by pinlock spinlock in the xQM structure.
92  */
93 STATIC void
94 xfs_qm_dquot_logitem_pin(
95         xfs_dq_logitem_t *logitem)
96 {
97         xfs_dquot_t *dqp;
98
99         dqp = logitem->qli_dquot;
100         ASSERT(XFS_DQ_IS_LOCKED(dqp));
101         spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
102         dqp->q_pincount++;
103         spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
104 }
105
106 /*
107  * Decrement the pin count of the given dquot, and wake up
108  * anyone in xfs_dqwait_unpin() if the count goes to 0.  The
109  * dquot must have been previously pinned with a call to xfs_dqpin().
110  */
111 /* ARGSUSED */
112 STATIC void
113 xfs_qm_dquot_logitem_unpin(
114         xfs_dq_logitem_t *logitem,
115         int               stale)
116 {
117         xfs_dquot_t *dqp;
118
119         dqp = logitem->qli_dquot;
120         ASSERT(dqp->q_pincount > 0);
121         spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
122         dqp->q_pincount--;
123         if (dqp->q_pincount == 0) {
124                 sv_broadcast(&dqp->q_pinwait);
125         }
126         spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
127 }
128
129 /* ARGSUSED */
130 STATIC void
131 xfs_qm_dquot_logitem_unpin_remove(
132         xfs_dq_logitem_t *logitem,
133         xfs_trans_t      *tp)
134 {
135         xfs_qm_dquot_logitem_unpin(logitem, 0);
136 }
137
138 /*
139  * Given the logitem, this writes the corresponding dquot entry to disk
140  * asynchronously. This is called with the dquot entry securely locked;
141  * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
142  * at the end.
143  */
144 STATIC void
145 xfs_qm_dquot_logitem_push(
146         xfs_dq_logitem_t        *logitem)
147 {
148         xfs_dquot_t     *dqp;
149
150         dqp = logitem->qli_dquot;
151
152         ASSERT(XFS_DQ_IS_LOCKED(dqp));
153         ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp));
154
155         /*
156          * Since we were able to lock the dquot's flush lock and
157          * we found it on the AIL, the dquot must be dirty.  This
158          * is because the dquot is removed from the AIL while still
159          * holding the flush lock in xfs_dqflush_done().  Thus, if
160          * we found it in the AIL and were able to obtain the flush
161          * lock without sleeping, then there must not have been
162          * anyone in the process of flushing the dquot.
163          */
164         xfs_qm_dqflush(dqp, XFS_B_DELWRI);
165         xfs_dqunlock(dqp);
166 }
167
168 /*ARGSUSED*/
169 STATIC xfs_lsn_t
170 xfs_qm_dquot_logitem_committed(
171         xfs_dq_logitem_t        *l,
172         xfs_lsn_t               lsn)
173 {
174         /*
175          * We always re-log the entire dquot when it becomes dirty,
176          * so, the latest copy _is_ the only one that matters.
177          */
178         return (lsn);
179 }
180
181
182 /*
183  * This is called to wait for the given dquot to be unpinned.
184  * Most of these pin/unpin routines are plagiarized from inode code.
185  */
186 void
187 xfs_qm_dqunpin_wait(
188         xfs_dquot_t     *dqp)
189 {
190         ASSERT(XFS_DQ_IS_LOCKED(dqp));
191         if (dqp->q_pincount == 0) {
192                 return;
193         }
194
195         /*
196          * Give the log a push so we don't wait here too long.
197          */
198         xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
199         spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
200         if (dqp->q_pincount == 0) {
201                 spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock));
202                 return;
203         }
204         sv_wait(&(dqp->q_pinwait), PINOD,
205                 &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s);
206 }
207
208 /*
209  * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
210  * the dquot is locked by us, but the flush lock isn't. So, here we are
211  * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
212  * If so, we want to push it out to help us take this item off the AIL as soon
213  * as possible.
214  *
215  * We must not be holding the AIL lock at this point. Calling incore() to
216  * search the buffer cache can be a time consuming thing, and AIL lock is a
217  * spinlock.
218  */
219 STATIC void
220 xfs_qm_dquot_logitem_pushbuf(
221         xfs_dq_logitem_t    *qip)
222 {
223         xfs_dquot_t     *dqp;
224         xfs_mount_t     *mp;
225         xfs_buf_t       *bp;
226         uint            dopush;
227
228         dqp = qip->qli_dquot;
229         ASSERT(XFS_DQ_IS_LOCKED(dqp));
230
231         /*
232          * The qli_pushbuf_flag keeps others from
233          * trying to duplicate our effort.
234          */
235         ASSERT(qip->qli_pushbuf_flag != 0);
236         ASSERT(qip->qli_push_owner == current_pid());
237
238         /*
239          * If flushlock isn't locked anymore, chances are that the
240          * inode flush completed and the inode was taken off the AIL.
241          * So, just get out.
242          */
243         if (!issemalocked(&(dqp->q_flock))  ||
244             ((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
245                 qip->qli_pushbuf_flag = 0;
246                 xfs_dqunlock(dqp);
247                 return;
248         }
249         mp = dqp->q_mount;
250         bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
251                     XFS_QI_DQCHUNKLEN(mp),
252                     XFS_INCORE_TRYLOCK);
253         if (bp != NULL) {
254                 if (XFS_BUF_ISDELAYWRITE(bp)) {
255                         dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
256                                   issemalocked(&(dqp->q_flock)));
257                         qip->qli_pushbuf_flag = 0;
258                         xfs_dqunlock(dqp);
259
260                         if (XFS_BUF_ISPINNED(bp)) {
261                                 xfs_log_force(mp, (xfs_lsn_t)0,
262                                               XFS_LOG_FORCE);
263                         }
264                         if (dopush) {
265 #ifdef XFSRACEDEBUG
266                                 delay_for_intr();
267                                 delay(300);
268 #endif
269                                 xfs_bawrite(mp, bp);
270                         } else {
271                                 xfs_buf_relse(bp);
272                         }
273                 } else {
274                         qip->qli_pushbuf_flag = 0;
275                         xfs_dqunlock(dqp);
276                         xfs_buf_relse(bp);
277                 }
278                 return;
279         }
280
281         qip->qli_pushbuf_flag = 0;
282         xfs_dqunlock(dqp);
283 }
284
285 /*
286  * This is called to attempt to lock the dquot associated with this
287  * dquot log item.  Don't sleep on the dquot lock or the flush lock.
288  * If the flush lock is already held, indicating that the dquot has
289  * been or is in the process of being flushed, then see if we can
290  * find the dquot's buffer in the buffer cache without sleeping.  If
291  * we can and it is marked delayed write, then we want to send it out.
292  * We delay doing so until the push routine, though, to avoid sleeping
293  * in any device strategy routines.
294  */
295 STATIC uint
296 xfs_qm_dquot_logitem_trylock(
297         xfs_dq_logitem_t        *qip)
298 {
299         xfs_dquot_t             *dqp;
300         uint                    retval;
301
302         dqp = qip->qli_dquot;
303         if (dqp->q_pincount > 0)
304                 return (XFS_ITEM_PINNED);
305
306         if (! xfs_qm_dqlock_nowait(dqp))
307                 return (XFS_ITEM_LOCKED);
308
309         retval = XFS_ITEM_SUCCESS;
310         if (! xfs_qm_dqflock_nowait(dqp)) {
311                 /*
312                  * The dquot is already being flushed.  It may have been
313                  * flushed delayed write, however, and we don't want to
314                  * get stuck waiting for that to complete.  So, we want to check
315                  * to see if we can lock the dquot's buffer without sleeping.
316                  * If we can and it is marked for delayed write, then we
317                  * hold it and send it out from the push routine.  We don't
318                  * want to do that now since we might sleep in the device
319                  * strategy routine.  We also don't want to grab the buffer lock
320                  * here because we'd like not to call into the buffer cache
321                  * while holding the AIL lock.
322                  * Make sure to only return PUSHBUF if we set pushbuf_flag
323                  * ourselves.  If someone else is doing it then we don't
324                  * want to go to the push routine and duplicate their efforts.
325                  */
326                 if (qip->qli_pushbuf_flag == 0) {
327                         qip->qli_pushbuf_flag = 1;
328                         ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
329 #ifdef DEBUG
330                         qip->qli_push_owner = current_pid();
331 #endif
332                         /*
333                          * The dquot is left locked.
334                          */
335                         retval = XFS_ITEM_PUSHBUF;
336                 } else {
337                         retval = XFS_ITEM_FLUSHING;
338                         xfs_dqunlock_nonotify(dqp);
339                 }
340         }
341
342         ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
343         return (retval);
344 }
345
346
347 /*
348  * Unlock the dquot associated with the log item.
349  * Clear the fields of the dquot and dquot log item that
350  * are specific to the current transaction.  If the
351  * hold flags is set, do not unlock the dquot.
352  */
353 STATIC void
354 xfs_qm_dquot_logitem_unlock(
355         xfs_dq_logitem_t    *ql)
356 {
357         xfs_dquot_t     *dqp;
358
359         ASSERT(ql != NULL);
360         dqp = ql->qli_dquot;
361         ASSERT(XFS_DQ_IS_LOCKED(dqp));
362
363         /*
364          * Clear the transaction pointer in the dquot
365          */
366         dqp->q_transp = NULL;
367
368         /*
369          * dquots are never 'held' from getting unlocked at the end of
370          * a transaction.  Their locking and unlocking is hidden inside the
371          * transaction layer, within trans_commit. Hence, no LI_HOLD flag
372          * for the logitem.
373          */
374         xfs_dqunlock(dqp);
375 }
376
377
378 /*
379  * this needs to stamp an lsn into the dquot, I think.
380  * rpc's that look at user dquot's would then have to
381  * push on the dependency recorded in the dquot
382  */
383 /* ARGSUSED */
384 STATIC void
385 xfs_qm_dquot_logitem_committing(
386         xfs_dq_logitem_t        *l,
387         xfs_lsn_t               lsn)
388 {
389         return;
390 }
391
392
393 /*
394  * This is the ops vector for dquots
395  */
396 static struct xfs_item_ops xfs_dquot_item_ops = {
397         .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_size,
398         .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
399                                         xfs_qm_dquot_logitem_format,
400         .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_pin,
401         .iop_unpin      = (void(*)(xfs_log_item_t*, int))
402                                         xfs_qm_dquot_logitem_unpin,
403         .iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t*))
404                                         xfs_qm_dquot_logitem_unpin_remove,
405         .iop_trylock    = (uint(*)(xfs_log_item_t*))
406                                         xfs_qm_dquot_logitem_trylock,
407         .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_unlock,
408         .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
409                                         xfs_qm_dquot_logitem_committed,
410         .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_dquot_logitem_push,
411         .iop_pushbuf    = (void(*)(xfs_log_item_t*))
412                                         xfs_qm_dquot_logitem_pushbuf,
413         .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
414                                         xfs_qm_dquot_logitem_committing
415 };
416
417 /*
418  * Initialize the dquot log item for a newly allocated dquot.
419  * The dquot isn't locked at this point, but it isn't on any of the lists
420  * either, so we don't care.
421  */
422 void
423 xfs_qm_dquot_logitem_init(
424         struct xfs_dquot *dqp)
425 {
426         xfs_dq_logitem_t  *lp;
427         lp = &dqp->q_logitem;
428
429         lp->qli_item.li_type = XFS_LI_DQUOT;
430         lp->qli_item.li_ops = &xfs_dquot_item_ops;
431         lp->qli_item.li_mountp = dqp->q_mount;
432         lp->qli_dquot = dqp;
433         lp->qli_format.qlf_type = XFS_LI_DQUOT;
434         lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
435         lp->qli_format.qlf_blkno = dqp->q_blkno;
436         lp->qli_format.qlf_len = 1;
437         /*
438          * This is just the offset of this dquot within its buffer
439          * (which is currently 1 FSB and probably won't change).
440          * Hence 32 bits for this offset should be just fine.
441          * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
442          * here, and recompute it at recovery time.
443          */
444         lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
445 }
446
447 /*------------------  QUOTAOFF LOG ITEMS  -------------------*/
448
449 /*
450  * This returns the number of iovecs needed to log the given quotaoff item.
451  * We only need 1 iovec for an quotaoff item.  It just logs the
452  * quotaoff_log_format structure.
453  */
454 /*ARGSUSED*/
455 STATIC uint
456 xfs_qm_qoff_logitem_size(xfs_qoff_logitem_t *qf)
457 {
458         return (1);
459 }
460
461 /*
462  * This is called to fill in the vector of log iovecs for the
463  * given quotaoff log item. We use only 1 iovec, and we point that
464  * at the quotaoff_log_format structure embedded in the quotaoff item.
465  * It is at this point that we assert that all of the extent
466  * slots in the quotaoff item have been filled.
467  */
468 STATIC void
469 xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t   *qf,
470                            xfs_log_iovec_t      *log_vector)
471 {
472         ASSERT(qf->qql_format.qf_type == XFS_LI_QUOTAOFF);
473
474         log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
475         log_vector->i_len = sizeof(xfs_qoff_logitem_t);
476         XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
477         qf->qql_format.qf_size = 1;
478 }
479
480
481 /*
482  * Pinning has no meaning for an quotaoff item, so just return.
483  */
484 /*ARGSUSED*/
485 STATIC void
486 xfs_qm_qoff_logitem_pin(xfs_qoff_logitem_t *qf)
487 {
488         return;
489 }
490
491
492 /*
493  * Since pinning has no meaning for an quotaoff item, unpinning does
494  * not either.
495  */
496 /*ARGSUSED*/
497 STATIC void
498 xfs_qm_qoff_logitem_unpin(xfs_qoff_logitem_t *qf, int stale)
499 {
500         return;
501 }
502
503 /*ARGSUSED*/
504 STATIC void
505 xfs_qm_qoff_logitem_unpin_remove(xfs_qoff_logitem_t *qf, xfs_trans_t *tp)
506 {
507         return;
508 }
509
510 /*
511  * Quotaoff items have no locking, so just return success.
512  */
513 /*ARGSUSED*/
514 STATIC uint
515 xfs_qm_qoff_logitem_trylock(xfs_qoff_logitem_t *qf)
516 {
517         return XFS_ITEM_LOCKED;
518 }
519
520 /*
521  * Quotaoff items have no locking or pushing, so return failure
522  * so that the caller doesn't bother with us.
523  */
524 /*ARGSUSED*/
525 STATIC void
526 xfs_qm_qoff_logitem_unlock(xfs_qoff_logitem_t *qf)
527 {
528         return;
529 }
530
531 /*
532  * The quotaoff-start-item is logged only once and cannot be moved in the log,
533  * so simply return the lsn at which it's been logged.
534  */
535 /*ARGSUSED*/
536 STATIC xfs_lsn_t
537 xfs_qm_qoff_logitem_committed(xfs_qoff_logitem_t *qf, xfs_lsn_t lsn)
538 {
539         return (lsn);
540 }
541
542 /*
543  * There isn't much you can do to push on an quotaoff item.  It is simply
544  * stuck waiting for the log to be flushed to disk.
545  */
546 /*ARGSUSED*/
547 STATIC void
548 xfs_qm_qoff_logitem_push(xfs_qoff_logitem_t *qf)
549 {
550         return;
551 }
552
553
554 /*ARGSUSED*/
555 STATIC xfs_lsn_t
556 xfs_qm_qoffend_logitem_committed(
557         xfs_qoff_logitem_t *qfe,
558         xfs_lsn_t lsn)
559 {
560         xfs_qoff_logitem_t      *qfs;
561
562         qfs = qfe->qql_start_lip;
563         spin_lock(&qfs->qql_item.li_mountp->m_ail_lock);
564         /*
565          * Delete the qoff-start logitem from the AIL.
566          * xfs_trans_delete_ail() drops the AIL lock.
567          */
568         xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs);
569         kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
570         kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
571         return (xfs_lsn_t)-1;
572 }
573
574 /*
575  * XXX rcc - don't know quite what to do with this.  I think we can
576  * just ignore it.  The only time that isn't the case is if we allow
577  * the client to somehow see that quotas have been turned off in which
578  * we can't allow that to get back until the quotaoff hits the disk.
579  * So how would that happen?  Also, do we need different routines for
580  * quotaoff start and quotaoff end?  I suspect the answer is yes but
581  * to be sure, I need to look at the recovery code and see how quota off
582  * recovery is handled (do we roll forward or back or do something else).
583  * If we roll forwards or backwards, then we need two separate routines,
584  * one that does nothing and one that stamps in the lsn that matters
585  * (truly makes the quotaoff irrevocable).  If we do something else,
586  * then maybe we don't need two.
587  */
588 /* ARGSUSED */
589 STATIC void
590 xfs_qm_qoff_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
591 {
592         return;
593 }
594
595 /* ARGSUSED */
596 STATIC void
597 xfs_qm_qoffend_logitem_committing(xfs_qoff_logitem_t *qip, xfs_lsn_t commit_lsn)
598 {
599         return;
600 }
601
602 static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
603         .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
604         .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
605                                         xfs_qm_qoff_logitem_format,
606         .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
607         .iop_unpin      = (void(*)(xfs_log_item_t* ,int))
608                                         xfs_qm_qoff_logitem_unpin,
609         .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
610                                         xfs_qm_qoff_logitem_unpin_remove,
611         .iop_trylock    = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
612         .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
613         .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
614                                         xfs_qm_qoffend_logitem_committed,
615         .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
616         .iop_pushbuf    = NULL,
617         .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
618                                         xfs_qm_qoffend_logitem_committing
619 };
620
621 /*
622  * This is the ops vector shared by all quotaoff-start log items.
623  */
624 static struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
625         .iop_size       = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_size,
626         .iop_format     = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
627                                         xfs_qm_qoff_logitem_format,
628         .iop_pin        = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_pin,
629         .iop_unpin      = (void(*)(xfs_log_item_t*, int))
630                                         xfs_qm_qoff_logitem_unpin,
631         .iop_unpin_remove = (void(*)(xfs_log_item_t*,xfs_trans_t*))
632                                         xfs_qm_qoff_logitem_unpin_remove,
633         .iop_trylock    = (uint(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_trylock,
634         .iop_unlock     = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_unlock,
635         .iop_committed  = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
636                                         xfs_qm_qoff_logitem_committed,
637         .iop_push       = (void(*)(xfs_log_item_t*))xfs_qm_qoff_logitem_push,
638         .iop_pushbuf    = NULL,
639         .iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
640                                         xfs_qm_qoff_logitem_committing
641 };
642
643 /*
644  * Allocate and initialize an quotaoff item of the correct quota type(s).
645  */
646 xfs_qoff_logitem_t *
647 xfs_qm_qoff_logitem_init(
648         struct xfs_mount *mp,
649         xfs_qoff_logitem_t *start,
650         uint flags)
651 {
652         xfs_qoff_logitem_t      *qf;
653
654         qf = (xfs_qoff_logitem_t*) kmem_zalloc(sizeof(xfs_qoff_logitem_t), KM_SLEEP);
655
656         qf->qql_item.li_type = XFS_LI_QUOTAOFF;
657         if (start)
658                 qf->qql_item.li_ops = &xfs_qm_qoffend_logitem_ops;
659         else
660                 qf->qql_item.li_ops = &xfs_qm_qoff_logitem_ops;
661         qf->qql_item.li_mountp = mp;
662         qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
663         qf->qql_format.qf_flags = flags;
664         qf->qql_start_lip = start;
665         return (qf);
666 }