Merge branch 'master'
[linux-2.6] / fs / xfs / xfs_log.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir.h"
28 #include "xfs_dir2.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_error.h"
32 #include "xfs_log_priv.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_alloc_btree.h"
36 #include "xfs_ialloc_btree.h"
37 #include "xfs_log_recover.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_dir_sf.h"
40 #include "xfs_dir2_sf.h"
41 #include "xfs_attr_sf.h"
42 #include "xfs_dinode.h"
43 #include "xfs_inode.h"
44 #include "xfs_rw.h"
45
46
47 #define xlog_write_adv_cnt(ptr, len, off, bytes) \
48         { (ptr) += (bytes); \
49           (len) -= (bytes); \
50           (off) += (bytes);}
51
52 /* Local miscellaneous function prototypes */
53 STATIC int       xlog_bdstrat_cb(struct xfs_buf *);
54 STATIC int       xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
55                                     xlog_in_core_t **, xfs_lsn_t *);
56 STATIC xlog_t *  xlog_alloc_log(xfs_mount_t     *mp,
57                                 xfs_buftarg_t   *log_target,
58                                 xfs_daddr_t     blk_offset,
59                                 int             num_bblks);
60 STATIC int       xlog_space_left(xlog_t *log, int cycle, int bytes);
61 STATIC int       xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
62 STATIC void      xlog_unalloc_log(xlog_t *log);
63 STATIC int       xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
64                             int nentries, xfs_log_ticket_t tic,
65                             xfs_lsn_t *start_lsn,
66                             xlog_in_core_t **commit_iclog,
67                             uint flags);
68
69 /* local state machine functions */
70 STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
71 STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
72 STATIC int  xlog_state_get_iclog_space(xlog_t           *log,
73                                        int              len,
74                                        xlog_in_core_t   **iclog,
75                                        xlog_ticket_t    *ticket,
76                                        int              *continued_write,
77                                        int              *logoffsetp);
78 STATIC void xlog_state_put_ticket(xlog_t        *log,
79                                   xlog_ticket_t *tic);
80 STATIC int  xlog_state_release_iclog(xlog_t             *log,
81                                      xlog_in_core_t     *iclog);
82 STATIC void xlog_state_switch_iclogs(xlog_t             *log,
83                                      xlog_in_core_t *iclog,
84                                      int                eventual_size);
85 STATIC int  xlog_state_sync(xlog_t                      *log,
86                             xfs_lsn_t                   lsn,
87                             uint                        flags,
88                             int                         *log_flushed);
89 STATIC int  xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
90 STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
91
92 /* local functions to manipulate grant head */
93 STATIC int  xlog_grant_log_space(xlog_t         *log,
94                                  xlog_ticket_t  *xtic);
95 STATIC void xlog_grant_push_ail(xfs_mount_t     *mp,
96                                 int             need_bytes);
97 STATIC void xlog_regrant_reserve_log_space(xlog_t        *log,
98                                            xlog_ticket_t *ticket);
99 STATIC int xlog_regrant_write_log_space(xlog_t          *log,
100                                          xlog_ticket_t  *ticket);
101 STATIC void xlog_ungrant_log_space(xlog_t        *log,
102                                    xlog_ticket_t *ticket);
103
104
105 /* local ticket functions */
106 STATIC void             xlog_state_ticket_alloc(xlog_t *log);
107 STATIC xlog_ticket_t    *xlog_ticket_get(xlog_t *log,
108                                          int    unit_bytes,
109                                          int    count,
110                                          char   clientid,
111                                          uint   flags);
112 STATIC void             xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket);
113
114 #if defined(DEBUG)
115 STATIC void     xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr);
116 STATIC void     xlog_verify_grant_head(xlog_t *log, int equals);
117 STATIC void     xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
118                                   int count, boolean_t syncing);
119 STATIC void     xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
120                                      xfs_lsn_t tail_lsn);
121 #else
122 #define xlog_verify_dest_ptr(a,b)
123 #define xlog_verify_grant_head(a,b)
124 #define xlog_verify_iclog(a,b,c,d)
125 #define xlog_verify_tail_lsn(a,b,c)
126 #endif
127
128 STATIC int      xlog_iclogs_empty(xlog_t *log);
129
130 #if defined(XFS_LOG_TRACE)
131 void
132 xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
133 {
134         unsigned long cnts;
135
136         if (!log->l_grant_trace) {
137                 log->l_grant_trace = ktrace_alloc(2048, KM_NOSLEEP);
138                 if (!log->l_grant_trace)
139                         return;
140         }
141         /* ticket counts are 1 byte each */
142         cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
143
144         ktrace_enter(log->l_grant_trace,
145                      (void *)tic,
146                      (void *)log->l_reserve_headq,
147                      (void *)log->l_write_headq,
148                      (void *)((unsigned long)log->l_grant_reserve_cycle),
149                      (void *)((unsigned long)log->l_grant_reserve_bytes),
150                      (void *)((unsigned long)log->l_grant_write_cycle),
151                      (void *)((unsigned long)log->l_grant_write_bytes),
152                      (void *)((unsigned long)log->l_curr_cycle),
153                      (void *)((unsigned long)log->l_curr_block),
154                      (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
155                      (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
156                      (void *)string,
157                      (void *)((unsigned long)tic->t_trans_type),
158                      (void *)cnts,
159                      (void *)((unsigned long)tic->t_curr_res),
160                      (void *)((unsigned long)tic->t_unit_res));
161 }
162
163 void
164 xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
165 {
166         if (!iclog->ic_trace)
167                 iclog->ic_trace = ktrace_alloc(256, KM_SLEEP);
168         ktrace_enter(iclog->ic_trace,
169                      (void *)((unsigned long)state),
170                      (void *)((unsigned long)current_pid()),
171                      (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
172                      (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
173                      (void *)NULL, (void *)NULL, (void *)NULL, (void *)NULL,
174                      (void *)NULL, (void *)NULL);
175 }
176 #else
177 #define xlog_trace_loggrant(log,tic,string)
178 #define xlog_trace_iclog(iclog,state)
179 #endif /* XFS_LOG_TRACE */
180
181 /*
182  * NOTES:
183  *
184  *      1. currblock field gets updated at startup and after in-core logs
185  *              marked as with WANT_SYNC.
186  */
187
188 /*
189  * This routine is called when a user of a log manager ticket is done with
190  * the reservation.  If the ticket was ever used, then a commit record for
191  * the associated transaction is written out as a log operation header with
192  * no data.  The flag XLOG_TIC_INITED is set when the first write occurs with
193  * a given ticket.  If the ticket was one with a permanent reservation, then
194  * a few operations are done differently.  Permanent reservation tickets by
195  * default don't release the reservation.  They just commit the current
196  * transaction with the belief that the reservation is still needed.  A flag
197  * must be passed in before permanent reservations are actually released.
198  * When these type of tickets are not released, they need to be set into
199  * the inited state again.  By doing this, a start record will be written
200  * out when the next write occurs.
201  */
202 xfs_lsn_t
203 xfs_log_done(xfs_mount_t        *mp,
204              xfs_log_ticket_t   xtic,
205              void               **iclog,
206              uint               flags)
207 {
208         xlog_t          *log    = mp->m_log;
209         xlog_ticket_t   *ticket = (xfs_log_ticket_t) xtic;
210         xfs_lsn_t       lsn     = 0;
211
212         if (XLOG_FORCED_SHUTDOWN(log) ||
213             /*
214              * If nothing was ever written, don't write out commit record.
215              * If we get an error, just continue and give back the log ticket.
216              */
217             (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
218              (xlog_commit_record(mp, ticket,
219                                  (xlog_in_core_t **)iclog, &lsn)))) {
220                 lsn = (xfs_lsn_t) -1;
221                 if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
222                         flags |= XFS_LOG_REL_PERM_RESERV;
223                 }
224         }
225
226
227         if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
228             (flags & XFS_LOG_REL_PERM_RESERV)) {
229                 /*
230                  * Release ticket if not permanent reservation or a specifc
231                  * request has been made to release a permanent reservation.
232                  */
233                 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
234                 xlog_ungrant_log_space(log, ticket);
235                 xlog_state_put_ticket(log, ticket);
236         } else {
237                 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
238                 xlog_regrant_reserve_log_space(log, ticket);
239         }
240
241         /* If this ticket was a permanent reservation and we aren't
242          * trying to release it, reset the inited flags; so next time
243          * we write, a start record will be written out.
244          */
245         if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) &&
246             (flags & XFS_LOG_REL_PERM_RESERV) == 0)
247                 ticket->t_flags |= XLOG_TIC_INITED;
248
249         return lsn;
250 }       /* xfs_log_done */
251
252
253 /*
254  * Force the in-core log to disk.  If flags == XFS_LOG_SYNC,
255  *      the force is done synchronously.
256  *
257  * Asynchronous forces are implemented by setting the WANT_SYNC
258  * bit in the appropriate in-core log and then returning.
259  *
260  * Synchronous forces are implemented with a semaphore.  All callers
261  * to force a given lsn to disk will wait on a semaphore attached to the
262  * specific in-core log.  When given in-core log finally completes its
263  * write to disk, that thread will wake up all threads waiting on the
264  * semaphore.
265  */
266 int
267 _xfs_log_force(
268         xfs_mount_t     *mp,
269         xfs_lsn_t       lsn,
270         uint            flags,
271         int             *log_flushed)
272 {
273         xlog_t          *log = mp->m_log;
274         int             dummy;
275
276         if (!log_flushed)
277                 log_flushed = &dummy;
278
279         ASSERT(flags & XFS_LOG_FORCE);
280
281         XFS_STATS_INC(xs_log_force);
282
283         if (log->l_flags & XLOG_IO_ERROR)
284                 return XFS_ERROR(EIO);
285         if (lsn == 0)
286                 return xlog_state_sync_all(log, flags, log_flushed);
287         else
288                 return xlog_state_sync(log, lsn, flags, log_flushed);
289 }       /* xfs_log_force */
290
291 /*
292  * Attaches a new iclog I/O completion callback routine during
293  * transaction commit.  If the log is in error state, a non-zero
294  * return code is handed back and the caller is responsible for
295  * executing the callback at an appropriate time.
296  */
297 int
298 xfs_log_notify(xfs_mount_t        *mp,          /* mount of partition */
299                void               *iclog_hndl,  /* iclog to hang callback off */
300                xfs_log_callback_t *cb)
301 {
302         xlog_t *log = mp->m_log;
303         xlog_in_core_t    *iclog = (xlog_in_core_t *)iclog_hndl;
304         int     abortflg, spl;
305
306         cb->cb_next = NULL;
307         spl = LOG_LOCK(log);
308         abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
309         if (!abortflg) {
310                 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
311                               (iclog->ic_state == XLOG_STATE_WANT_SYNC));
312                 cb->cb_next = NULL;
313                 *(iclog->ic_callback_tail) = cb;
314                 iclog->ic_callback_tail = &(cb->cb_next);
315         }
316         LOG_UNLOCK(log, spl);
317         return abortflg;
318 }       /* xfs_log_notify */
319
320 int
321 xfs_log_release_iclog(xfs_mount_t *mp,
322                       void        *iclog_hndl)
323 {
324         xlog_t *log = mp->m_log;
325         xlog_in_core_t    *iclog = (xlog_in_core_t *)iclog_hndl;
326
327         if (xlog_state_release_iclog(log, iclog)) {
328                 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
329                 return(EIO);
330         }
331
332         return 0;
333 }
334
335 /*
336  *  1. Reserve an amount of on-disk log space and return a ticket corresponding
337  *      to the reservation.
338  *  2. Potentially, push buffers at tail of log to disk.
339  *
340  * Each reservation is going to reserve extra space for a log record header.
341  * When writes happen to the on-disk log, we don't subtract the length of the
342  * log record header from any reservation.  By wasting space in each
343  * reservation, we prevent over allocation problems.
344  */
345 int
346 xfs_log_reserve(xfs_mount_t      *mp,
347                 int              unit_bytes,
348                 int              cnt,
349                 xfs_log_ticket_t *ticket,
350                 __uint8_t        client,
351                 uint             flags,
352                 uint             t_type)
353 {
354         xlog_t          *log = mp->m_log;
355         xlog_ticket_t   *internal_ticket;
356         int             retval = 0;
357
358         ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
359         ASSERT((flags & XFS_LOG_NOSLEEP) == 0);
360
361         if (XLOG_FORCED_SHUTDOWN(log))
362                 return XFS_ERROR(EIO);
363
364         XFS_STATS_INC(xs_try_logspace);
365
366         if (*ticket != NULL) {
367                 ASSERT(flags & XFS_LOG_PERM_RESERV);
368                 internal_ticket = (xlog_ticket_t *)*ticket;
369                 xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)");
370                 xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
371                 retval = xlog_regrant_write_log_space(log, internal_ticket);
372         } else {
373                 /* may sleep if need to allocate more tickets */
374                 internal_ticket = xlog_ticket_get(log, unit_bytes, cnt,
375                                                   client, flags);
376                 internal_ticket->t_trans_type = t_type;
377                 *ticket = internal_ticket;
378                 xlog_trace_loggrant(log, internal_ticket, 
379                         (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ?
380                         "xfs_log_reserve: create new ticket (permanent trans)" :
381                         "xfs_log_reserve: create new ticket");
382                 xlog_grant_push_ail(mp,
383                                     (internal_ticket->t_unit_res *
384                                      internal_ticket->t_cnt));
385                 retval = xlog_grant_log_space(log, internal_ticket);
386         }
387
388         return retval;
389 }       /* xfs_log_reserve */
390
391
392 /*
393  * Mount a log filesystem
394  *
395  * mp           - ubiquitous xfs mount point structure
396  * log_target   - buftarg of on-disk log device
397  * blk_offset   - Start block # where block size is 512 bytes (BBSIZE)
398  * num_bblocks  - Number of BBSIZE blocks in on-disk log
399  *
400  * Return error or zero.
401  */
402 int
403 xfs_log_mount(xfs_mount_t       *mp,
404               xfs_buftarg_t     *log_target,
405               xfs_daddr_t       blk_offset,
406               int               num_bblks)
407 {
408         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
409                 cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname);
410         else {
411                 cmn_err(CE_NOTE,
412                         "!Mounting filesystem \"%s\" in no-recovery mode.  Filesystem will be inconsistent.",
413                         mp->m_fsname);
414                 ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY);
415         }
416
417         mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
418
419         /*
420          * skip log recovery on a norecovery mount.  pretend it all
421          * just worked.
422          */
423         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
424                 int     error;
425                 vfs_t   *vfsp = XFS_MTOVFS(mp);
426                 int     readonly = (vfsp->vfs_flag & VFS_RDONLY);
427
428                 if (readonly)
429                         vfsp->vfs_flag &= ~VFS_RDONLY;
430
431                 error = xlog_recover(mp->m_log, readonly);
432
433                 if (readonly)
434                         vfsp->vfs_flag |= VFS_RDONLY;
435                 if (error) {
436                         cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
437                         xlog_unalloc_log(mp->m_log);
438                         return error;
439                 }
440         }
441
442         /* Normal transactions can now occur */
443         mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
444
445         /* End mounting message in xfs_log_mount_finish */
446         return 0;
447 }       /* xfs_log_mount */
448
449 /*
450  * Finish the recovery of the file system.  This is separate from
451  * the xfs_log_mount() call, because it depends on the code in
452  * xfs_mountfs() to read in the root and real-time bitmap inodes
453  * between calling xfs_log_mount() and here.
454  *
455  * mp           - ubiquitous xfs mount point structure
456  */
457 int
458 xfs_log_mount_finish(xfs_mount_t *mp, int mfsi_flags)
459 {
460         int     error;
461
462         if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
463                 error = xlog_recover_finish(mp->m_log, mfsi_flags);
464         else {
465                 error = 0;
466                 ASSERT(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY);
467         }
468
469         return error;
470 }
471
472 /*
473  * Unmount processing for the log.
474  */
475 int
476 xfs_log_unmount(xfs_mount_t *mp)
477 {
478         int             error;
479
480         error = xfs_log_unmount_write(mp);
481         xfs_log_unmount_dealloc(mp);
482         return (error);
483 }
484
485 /*
486  * Final log writes as part of unmount.
487  *
488  * Mark the filesystem clean as unmount happens.  Note that during relocation
489  * this routine needs to be executed as part of source-bag while the
490  * deallocation must not be done until source-end.
491  */
492
493 /*
494  * Unmount record used to have a string "Unmount filesystem--" in the
495  * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
496  * We just write the magic number now since that particular field isn't
497  * currently architecture converted and "nUmount" is a bit foo.
498  * As far as I know, there weren't any dependencies on the old behaviour.
499  */
500
501 int
502 xfs_log_unmount_write(xfs_mount_t *mp)
503 {
504         xlog_t           *log = mp->m_log;
505         xlog_in_core_t   *iclog;
506 #ifdef DEBUG
507         xlog_in_core_t   *first_iclog;
508 #endif
509         xfs_log_iovec_t  reg[1];
510         xfs_log_ticket_t tic = NULL;
511         xfs_lsn_t        lsn;
512         int              error;
513         SPLDECL(s);
514
515         /* the data section must be 32 bit size aligned */
516         struct {
517             __uint16_t magic;
518             __uint16_t pad1;
519             __uint32_t pad2; /* may as well make it 64 bits */
520         } magic = { XLOG_UNMOUNT_TYPE, 0, 0 };
521
522         /*
523          * Don't write out unmount record on read-only mounts.
524          * Or, if we are doing a forced umount (typically because of IO errors).
525          */
526         if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
527                 return 0;
528
529         xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC);
530
531 #ifdef DEBUG
532         first_iclog = iclog = log->l_iclog;
533         do {
534                 if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
535                         ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
536                         ASSERT(iclog->ic_offset == 0);
537                 }
538                 iclog = iclog->ic_next;
539         } while (iclog != first_iclog);
540 #endif
541         if (! (XLOG_FORCED_SHUTDOWN(log))) {
542                 reg[0].i_addr = (void*)&magic;
543                 reg[0].i_len  = sizeof(magic);
544                 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
545
546                 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0, 0);
547                 if (!error) {
548                         /* remove inited flag */
549                         ((xlog_ticket_t *)tic)->t_flags = 0;
550                         error = xlog_write(mp, reg, 1, tic, &lsn,
551                                            NULL, XLOG_UNMOUNT_TRANS);
552                         /*
553                          * At this point, we're umounting anyway,
554                          * so there's no point in transitioning log state
555                          * to IOERROR. Just continue...
556                          */
557                 }
558
559                 if (error) {
560                         xfs_fs_cmn_err(CE_ALERT, mp,
561                                 "xfs_log_unmount: unmount record failed");
562                 }
563
564
565                 s = LOG_LOCK(log);
566                 iclog = log->l_iclog;
567                 iclog->ic_refcnt++;
568                 LOG_UNLOCK(log, s);
569                 xlog_state_want_sync(log, iclog);
570                 (void) xlog_state_release_iclog(log, iclog);
571
572                 s = LOG_LOCK(log);
573                 if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
574                       iclog->ic_state == XLOG_STATE_DIRTY)) {
575                         if (!XLOG_FORCED_SHUTDOWN(log)) {
576                                 sv_wait(&iclog->ic_forcesema, PMEM,
577                                         &log->l_icloglock, s);
578                         } else {
579                                 LOG_UNLOCK(log, s);
580                         }
581                 } else {
582                         LOG_UNLOCK(log, s);
583                 }
584                 if (tic)
585                         xlog_state_put_ticket(log, tic);
586         } else {
587                 /*
588                  * We're already in forced_shutdown mode, couldn't
589                  * even attempt to write out the unmount transaction.
590                  *
591                  * Go through the motions of sync'ing and releasing
592                  * the iclog, even though no I/O will actually happen,
593                  * we need to wait for other log I/O's that may already
594                  * be in progress.  Do this as a separate section of
595                  * code so we'll know if we ever get stuck here that
596                  * we're in this odd situation of trying to unmount
597                  * a file system that went into forced_shutdown as
598                  * the result of an unmount..
599                  */
600                 s = LOG_LOCK(log);
601                 iclog = log->l_iclog;
602                 iclog->ic_refcnt++;
603                 LOG_UNLOCK(log, s);
604
605                 xlog_state_want_sync(log, iclog);
606                 (void) xlog_state_release_iclog(log, iclog);
607
608                 s = LOG_LOCK(log);
609
610                 if ( ! (   iclog->ic_state == XLOG_STATE_ACTIVE
611                         || iclog->ic_state == XLOG_STATE_DIRTY
612                         || iclog->ic_state == XLOG_STATE_IOERROR) ) {
613
614                                 sv_wait(&iclog->ic_forcesema, PMEM,
615                                         &log->l_icloglock, s);
616                 } else {
617                         LOG_UNLOCK(log, s);
618                 }
619         }
620
621         return 0;
622 }       /* xfs_log_unmount_write */
623
624 /*
625  * Deallocate log structures for unmount/relocation.
626  */
627 void
628 xfs_log_unmount_dealloc(xfs_mount_t *mp)
629 {
630         xlog_unalloc_log(mp->m_log);
631 }
632
633 /*
634  * Write region vectors to log.  The write happens using the space reservation
635  * of the ticket (tic).  It is not a requirement that all writes for a given
636  * transaction occur with one call to xfs_log_write().
637  */
638 int
639 xfs_log_write(xfs_mount_t *     mp,
640               xfs_log_iovec_t   reg[],
641               int               nentries,
642               xfs_log_ticket_t  tic,
643               xfs_lsn_t         *start_lsn)
644 {
645         int     error;
646         xlog_t *log = mp->m_log;
647
648         if (XLOG_FORCED_SHUTDOWN(log))
649                 return XFS_ERROR(EIO);
650
651         if ((error = xlog_write(mp, reg, nentries, tic, start_lsn, NULL, 0))) {
652                 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
653         }
654         return (error);
655 }       /* xfs_log_write */
656
657
658 void
659 xfs_log_move_tail(xfs_mount_t   *mp,
660                   xfs_lsn_t     tail_lsn)
661 {
662         xlog_ticket_t   *tic;
663         xlog_t          *log = mp->m_log;
664         int             need_bytes, free_bytes, cycle, bytes;
665         SPLDECL(s);
666
667         if (XLOG_FORCED_SHUTDOWN(log))
668                 return;
669         ASSERT(!XFS_FORCED_SHUTDOWN(mp));
670
671         if (tail_lsn == 0) {
672                 /* needed since sync_lsn is 64 bits */
673                 s = LOG_LOCK(log);
674                 tail_lsn = log->l_last_sync_lsn;
675                 LOG_UNLOCK(log, s);
676         }
677
678         s = GRANT_LOCK(log);
679
680         /* Also an invalid lsn.  1 implies that we aren't passing in a valid
681          * tail_lsn.
682          */
683         if (tail_lsn != 1) {
684                 log->l_tail_lsn = tail_lsn;
685         }
686
687         if ((tic = log->l_write_headq)) {
688 #ifdef DEBUG
689                 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
690                         panic("Recovery problem");
691 #endif
692                 cycle = log->l_grant_write_cycle;
693                 bytes = log->l_grant_write_bytes;
694                 free_bytes = xlog_space_left(log, cycle, bytes);
695                 do {
696                         ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
697
698                         if (free_bytes < tic->t_unit_res && tail_lsn != 1)
699                                 break;
700                         tail_lsn = 0;
701                         free_bytes -= tic->t_unit_res;
702                         sv_signal(&tic->t_sema);
703                         tic = tic->t_next;
704                 } while (tic != log->l_write_headq);
705         }
706         if ((tic = log->l_reserve_headq)) {
707 #ifdef DEBUG
708                 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
709                         panic("Recovery problem");
710 #endif
711                 cycle = log->l_grant_reserve_cycle;
712                 bytes = log->l_grant_reserve_bytes;
713                 free_bytes = xlog_space_left(log, cycle, bytes);
714                 do {
715                         if (tic->t_flags & XLOG_TIC_PERM_RESERV)
716                                 need_bytes = tic->t_unit_res*tic->t_cnt;
717                         else
718                                 need_bytes = tic->t_unit_res;
719                         if (free_bytes < need_bytes && tail_lsn != 1)
720                                 break;
721                         tail_lsn = 0;
722                         free_bytes -= need_bytes;
723                         sv_signal(&tic->t_sema);
724                         tic = tic->t_next;
725                 } while (tic != log->l_reserve_headq);
726         }
727         GRANT_UNLOCK(log, s);
728 }       /* xfs_log_move_tail */
729
730 /*
731  * Determine if we have a transaction that has gone to disk
732  * that needs to be covered. Log activity needs to be idle (no AIL and
733  * nothing in the iclogs). And, we need to be in the right state indicating
734  * something has gone out.
735  */
736 int
737 xfs_log_need_covered(xfs_mount_t *mp)
738 {
739         SPLDECL(s);
740         int             needed = 0, gen;
741         xlog_t          *log = mp->m_log;
742         vfs_t           *vfsp = XFS_MTOVFS(mp);
743
744         if (fs_frozen(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
745             (vfsp->vfs_flag & VFS_RDONLY))
746                 return 0;
747
748         s = LOG_LOCK(log);
749         if (((log->l_covered_state == XLOG_STATE_COVER_NEED) ||
750                 (log->l_covered_state == XLOG_STATE_COVER_NEED2))
751                         && !xfs_trans_first_ail(mp, &gen)
752                         && xlog_iclogs_empty(log)) {
753                 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
754                         log->l_covered_state = XLOG_STATE_COVER_DONE;
755                 else {
756                         ASSERT(log->l_covered_state == XLOG_STATE_COVER_NEED2);
757                         log->l_covered_state = XLOG_STATE_COVER_DONE2;
758                 }
759                 needed = 1;
760         }
761         LOG_UNLOCK(log, s);
762         return(needed);
763 }
764
765 /******************************************************************************
766  *
767  *      local routines
768  *
769  ******************************************************************************
770  */
771
772 /* xfs_trans_tail_ail returns 0 when there is nothing in the list.
773  * The log manager must keep track of the last LR which was committed
774  * to disk.  The lsn of this LR will become the new tail_lsn whenever
775  * xfs_trans_tail_ail returns 0.  If we don't do this, we run into
776  * the situation where stuff could be written into the log but nothing
777  * was ever in the AIL when asked.  Eventually, we panic since the
778  * tail hits the head.
779  *
780  * We may be holding the log iclog lock upon entering this routine.
781  */
782 xfs_lsn_t
783 xlog_assign_tail_lsn(xfs_mount_t *mp)
784 {
785         xfs_lsn_t tail_lsn;
786         SPLDECL(s);
787         xlog_t    *log = mp->m_log;
788
789         tail_lsn = xfs_trans_tail_ail(mp);
790         s = GRANT_LOCK(log);
791         if (tail_lsn != 0) {
792                 log->l_tail_lsn = tail_lsn;
793         } else {
794                 tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
795         }
796         GRANT_UNLOCK(log, s);
797
798         return tail_lsn;
799 }       /* xlog_assign_tail_lsn */
800
801
802 /*
803  * Return the space in the log between the tail and the head.  The head
804  * is passed in the cycle/bytes formal parms.  In the special case where
805  * the reserve head has wrapped passed the tail, this calculation is no
806  * longer valid.  In this case, just return 0 which means there is no space
807  * in the log.  This works for all places where this function is called
808  * with the reserve head.  Of course, if the write head were to ever
809  * wrap the tail, we should blow up.  Rather than catch this case here,
810  * we depend on other ASSERTions in other parts of the code.   XXXmiken
811  *
812  * This code also handles the case where the reservation head is behind
813  * the tail.  The details of this case are described below, but the end
814  * result is that we return the size of the log as the amount of space left.
815  */
816 int
817 xlog_space_left(xlog_t *log, int cycle, int bytes)
818 {
819         int free_bytes;
820         int tail_bytes;
821         int tail_cycle;
822
823         tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
824         tail_cycle = CYCLE_LSN(log->l_tail_lsn);
825         if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
826                 free_bytes = log->l_logsize - (bytes - tail_bytes);
827         } else if ((tail_cycle + 1) < cycle) {
828                 return 0;
829         } else if (tail_cycle < cycle) {
830                 ASSERT(tail_cycle == (cycle - 1));
831                 free_bytes = tail_bytes - bytes;
832         } else {
833                 /*
834                  * The reservation head is behind the tail.
835                  * In this case we just want to return the size of the
836                  * log as the amount of space left.
837                  */
838                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
839                         "xlog_space_left: head behind tail\n"
840                         "  tail_cycle = %d, tail_bytes = %d\n"
841                         "  GH   cycle = %d, GH   bytes = %d",
842                         tail_cycle, tail_bytes, cycle, bytes);
843                 ASSERT(0);
844                 free_bytes = log->l_logsize;
845         }
846         return free_bytes;
847 }       /* xlog_space_left */
848
849
850 /*
851  * Log function which is called when an io completes.
852  *
853  * The log manager needs its own routine, in order to control what
854  * happens with the buffer after the write completes.
855  */
856 void
857 xlog_iodone(xfs_buf_t *bp)
858 {
859         xlog_in_core_t  *iclog;
860         xlog_t          *l;
861         int             aborted;
862
863         iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
864         ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2);
865         XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
866         aborted = 0;
867
868         /*
869          * Some versions of cpp barf on the recursive definition of
870          * ic_log -> hic_fields.ic_log and expand ic_log twice when
871          * it is passed through two macros.  Workaround broken cpp.
872          */
873         l = iclog->ic_log;
874
875         /*
876          * Race to shutdown the filesystem if we see an error.
877          */
878         if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp,
879                         XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
880                 xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
881                 XFS_BUF_STALE(bp);
882                 xfs_force_shutdown(l->l_mp, XFS_LOG_IO_ERROR);
883                 /*
884                  * This flag will be propagated to the trans-committed
885                  * callback routines to let them know that the log-commit
886                  * didn't succeed.
887                  */
888                 aborted = XFS_LI_ABORTED;
889         } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
890                 aborted = XFS_LI_ABORTED;
891         }
892         xlog_state_done_syncing(iclog, aborted);
893         if (!(XFS_BUF_ISASYNC(bp))) {
894                 /*
895                  * Corresponding psema() will be done in bwrite().  If we don't
896                  * vsema() here, panic.
897                  */
898                 XFS_BUF_V_IODONESEMA(bp);
899         }
900 }       /* xlog_iodone */
901
902 /*
903  * The bdstrat callback function for log bufs. This gives us a central
904  * place to trap bufs in case we get hit by a log I/O error and need to
905  * shutdown. Actually, in practice, even when we didn't get a log error,
906  * we transition the iclogs to IOERROR state *after* flushing all existing
907  * iclogs to disk. This is because we don't want anymore new transactions to be
908  * started or completed afterwards.
909  */
910 STATIC int
911 xlog_bdstrat_cb(struct xfs_buf *bp)
912 {
913         xlog_in_core_t *iclog;
914
915         iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
916
917         if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
918           /* note for irix bstrat will need  struct bdevsw passed
919            * Fix the following macro if the code ever is merged
920            */
921             XFS_bdstrat(bp);
922                 return 0;
923         }
924
925         xfs_buftrace("XLOG__BDSTRAT IOERROR", bp);
926         XFS_BUF_ERROR(bp, EIO);
927         XFS_BUF_STALE(bp);
928         xfs_biodone(bp);
929         return (XFS_ERROR(EIO));
930
931
932 }
933
934 /*
935  * Return size of each in-core log record buffer.
936  *
937  * Low memory machines only get 2 16KB buffers.  We don't want to waste
938  * memory here.  However, all other machines get at least 2 32KB buffers.
939  * The number is hard coded because we don't care about the minimum
940  * memory size, just 32MB systems.
941  *
942  * If the filesystem blocksize is too large, we may need to choose a
943  * larger size since the directory code currently logs entire blocks.
944  */
945
946 STATIC void
947 xlog_get_iclog_buffer_size(xfs_mount_t  *mp,
948                            xlog_t       *log)
949 {
950         int size;
951         int xhdrs;
952
953         if (mp->m_logbufs <= 0) {
954                 if (xfs_physmem <= btoc(128*1024*1024)) {
955                         log->l_iclog_bufs = XLOG_MIN_ICLOGS;
956                 } else if (xfs_physmem <= btoc(400*1024*1024)) {
957                         log->l_iclog_bufs = XLOG_MED_ICLOGS;
958                 } else {        /* 256K with 32K bufs */
959                         log->l_iclog_bufs = XLOG_MAX_ICLOGS;
960                 }
961         } else {
962                 log->l_iclog_bufs = mp->m_logbufs;
963         }
964
965         /*
966          * Buffer size passed in from mount system call.
967          */
968         if (mp->m_logbsize > 0) {
969                 size = log->l_iclog_size = mp->m_logbsize;
970                 log->l_iclog_size_log = 0;
971                 while (size != 1) {
972                         log->l_iclog_size_log++;
973                         size >>= 1;
974                 }
975
976                 if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
977                         /* # headers = size / 32K
978                          * one header holds cycles from 32K of data
979                          */
980
981                         xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
982                         if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
983                                 xhdrs++;
984                         log->l_iclog_hsize = xhdrs << BBSHIFT;
985                         log->l_iclog_heads = xhdrs;
986                 } else {
987                         ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
988                         log->l_iclog_hsize = BBSIZE;
989                         log->l_iclog_heads = 1;
990                 }
991                 goto done;
992         }
993
994         /*
995          * Special case machines that have less than 32MB of memory.
996          * All machines with more memory use 32KB buffers.
997          */
998         if (xfs_physmem <= btoc(32*1024*1024)) {
999                 /* Don't change; min configuration */
1000                 log->l_iclog_size = XLOG_RECORD_BSIZE;          /* 16k */
1001                 log->l_iclog_size_log = XLOG_RECORD_BSHIFT;
1002         } else {
1003                 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;      /* 32k */
1004                 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1005         }
1006
1007         /* the default log size is 16k or 32k which is one header sector */
1008         log->l_iclog_hsize = BBSIZE;
1009         log->l_iclog_heads = 1;
1010
1011         /*
1012          * For 16KB, we use 3 32KB buffers.  For 32KB block sizes, we use
1013          * 4 32KB buffers.  For 64KB block sizes, we use 8 32KB buffers.
1014          */
1015         if (mp->m_sb.sb_blocksize >= 16*1024) {
1016                 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1017                 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1018                 if (mp->m_logbufs <= 0) {
1019                         switch (mp->m_sb.sb_blocksize) {
1020                             case 16*1024:                       /* 16 KB */
1021                                 log->l_iclog_bufs = 3;
1022                                 break;
1023                             case 32*1024:                       /* 32 KB */
1024                                 log->l_iclog_bufs = 4;
1025                                 break;
1026                             case 64*1024:                       /* 64 KB */
1027                                 log->l_iclog_bufs = 8;
1028                                 break;
1029                             default:
1030                                 xlog_panic("XFS: Invalid blocksize");
1031                                 break;
1032                         }
1033                 }
1034         }
1035
1036 done:   /* are we being asked to make the sizes selected above visible? */
1037         if (mp->m_logbufs == 0)
1038                 mp->m_logbufs = log->l_iclog_bufs;
1039         if (mp->m_logbsize == 0)
1040                 mp->m_logbsize = log->l_iclog_size;
1041 }       /* xlog_get_iclog_buffer_size */
1042
1043
1044 /*
1045  * This routine initializes some of the log structure for a given mount point.
1046  * Its primary purpose is to fill in enough, so recovery can occur.  However,
1047  * some other stuff may be filled in too.
1048  */
1049 STATIC xlog_t *
1050 xlog_alloc_log(xfs_mount_t      *mp,
1051                xfs_buftarg_t    *log_target,
1052                xfs_daddr_t      blk_offset,
1053                int              num_bblks)
1054 {
1055         xlog_t                  *log;
1056         xlog_rec_header_t       *head;
1057         xlog_in_core_t          **iclogp;
1058         xlog_in_core_t          *iclog, *prev_iclog=NULL;
1059         xfs_buf_t               *bp;
1060         int                     i;
1061         int                     iclogsize;
1062
1063         log = (xlog_t *)kmem_zalloc(sizeof(xlog_t), KM_SLEEP);
1064
1065         log->l_mp          = mp;
1066         log->l_targ        = log_target;
1067         log->l_logsize     = BBTOB(num_bblks);
1068         log->l_logBBstart  = blk_offset;
1069         log->l_logBBsize   = num_bblks;
1070         log->l_covered_state = XLOG_STATE_COVER_IDLE;
1071         log->l_flags       |= XLOG_ACTIVE_RECOVERY;
1072
1073         log->l_prev_block  = -1;
1074         ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, 1, 0);
1075         /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1076         log->l_last_sync_lsn = log->l_tail_lsn;
1077         log->l_curr_cycle  = 1;     /* 0 is bad since this is initial value */
1078         log->l_grant_reserve_cycle = 1;
1079         log->l_grant_write_cycle = 1;
1080
1081         if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) {
1082                 log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
1083                 ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
1084                 /* for larger sector sizes, must have v2 or external log */
1085                 ASSERT(log->l_sectbb_log == 0 ||
1086                         log->l_logBBstart == 0 ||
1087                         XFS_SB_VERSION_HASLOGV2(&mp->m_sb));
1088                 ASSERT(mp->m_sb.sb_logsectlog >= BBSHIFT);
1089         }
1090         log->l_sectbb_mask = (1 << log->l_sectbb_log) - 1;
1091
1092         xlog_get_iclog_buffer_size(mp, log);
1093
1094         bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
1095         XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
1096         XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
1097         XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
1098         ASSERT(XFS_BUF_ISBUSY(bp));
1099         ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
1100         log->l_xbuf = bp;
1101
1102         spinlock_init(&log->l_icloglock, "iclog");
1103         spinlock_init(&log->l_grant_lock, "grhead_iclog");
1104         initnsema(&log->l_flushsema, 0, "ic-flush");
1105         xlog_state_ticket_alloc(log);  /* wait until after icloglock inited */
1106
1107         /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1108         ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
1109
1110         iclogp = &log->l_iclog;
1111         /*
1112          * The amount of memory to allocate for the iclog structure is
1113          * rather funky due to the way the structure is defined.  It is
1114          * done this way so that we can use different sizes for machines
1115          * with different amounts of memory.  See the definition of
1116          * xlog_in_core_t in xfs_log_priv.h for details.
1117          */
1118         iclogsize = log->l_iclog_size;
1119         ASSERT(log->l_iclog_size >= 4096);
1120         for (i=0; i < log->l_iclog_bufs; i++) {
1121                 *iclogp = (xlog_in_core_t *)
1122                           kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP);
1123                 iclog = *iclogp;
1124                 iclog->hic_data = (xlog_in_core_2_t *)
1125                           kmem_zalloc(iclogsize, KM_SLEEP);
1126
1127                 iclog->ic_prev = prev_iclog;
1128                 prev_iclog = iclog;
1129                 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1130
1131                 head = &iclog->ic_header;
1132                 memset(head, 0, sizeof(xlog_rec_header_t));
1133                 INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1134                 INT_SET(head->h_version, ARCH_CONVERT,
1135                         XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1136                 INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size);
1137                 /* new fields */
1138                 INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT);
1139                 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
1140
1141                 bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
1142                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
1143                 XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
1144                 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
1145                 iclog->ic_bp = bp;
1146
1147                 iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
1148                 iclog->ic_state = XLOG_STATE_ACTIVE;
1149                 iclog->ic_log = log;
1150                 iclog->ic_callback_tail = &(iclog->ic_callback);
1151                 iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize;
1152
1153                 ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
1154                 ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
1155                 sv_init(&iclog->ic_forcesema, SV_DEFAULT, "iclog-force");
1156                 sv_init(&iclog->ic_writesema, SV_DEFAULT, "iclog-write");
1157
1158                 iclogp = &iclog->ic_next;
1159         }
1160         *iclogp = log->l_iclog;                 /* complete ring */
1161         log->l_iclog->ic_prev = prev_iclog;     /* re-write 1st prev ptr */
1162
1163         return log;
1164 }       /* xlog_alloc_log */
1165
1166
1167 /*
1168  * Write out the commit record of a transaction associated with the given
1169  * ticket.  Return the lsn of the commit record.
1170  */
1171 STATIC int
1172 xlog_commit_record(xfs_mount_t  *mp,
1173                    xlog_ticket_t *ticket,
1174                    xlog_in_core_t **iclog,
1175                    xfs_lsn_t    *commitlsnp)
1176 {
1177         int             error;
1178         xfs_log_iovec_t reg[1];
1179
1180         reg[0].i_addr = NULL;
1181         reg[0].i_len = 0;
1182         XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_COMMIT);
1183
1184         ASSERT_ALWAYS(iclog);
1185         if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
1186                                iclog, XLOG_COMMIT_TRANS))) {
1187                 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR);
1188         }
1189         return (error);
1190 }       /* xlog_commit_record */
1191
1192
1193 /*
1194  * Push on the buffer cache code if we ever use more than 75% of the on-disk
1195  * log space.  This code pushes on the lsn which would supposedly free up
1196  * the 25% which we want to leave free.  We may need to adopt a policy which
1197  * pushes on an lsn which is further along in the log once we reach the high
1198  * water mark.  In this manner, we would be creating a low water mark.
1199  */
1200 void
1201 xlog_grant_push_ail(xfs_mount_t *mp,
1202                     int         need_bytes)
1203 {
1204     xlog_t      *log = mp->m_log;       /* pointer to the log */
1205     xfs_lsn_t   tail_lsn;               /* lsn of the log tail */
1206     xfs_lsn_t   threshold_lsn = 0;      /* lsn we'd like to be at */
1207     int         free_blocks;            /* free blocks left to write to */
1208     int         free_bytes;             /* free bytes left to write to */
1209     int         threshold_block;        /* block in lsn we'd like to be at */
1210     int         threshold_cycle;        /* lsn cycle we'd like to be at */
1211     int         free_threshold;
1212     SPLDECL(s);
1213
1214     ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1215
1216     s = GRANT_LOCK(log);
1217     free_bytes = xlog_space_left(log,
1218                                  log->l_grant_reserve_cycle,
1219                                  log->l_grant_reserve_bytes);
1220     tail_lsn = log->l_tail_lsn;
1221     free_blocks = BTOBBT(free_bytes);
1222
1223     /*
1224      * Set the threshold for the minimum number of free blocks in the
1225      * log to the maximum of what the caller needs, one quarter of the
1226      * log, and 256 blocks.
1227      */
1228     free_threshold = BTOBB(need_bytes);
1229     free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1230     free_threshold = MAX(free_threshold, 256);
1231     if (free_blocks < free_threshold) {
1232         threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
1233         threshold_cycle = CYCLE_LSN(tail_lsn);
1234         if (threshold_block >= log->l_logBBsize) {
1235             threshold_block -= log->l_logBBsize;
1236             threshold_cycle += 1;
1237         }
1238         ASSIGN_ANY_LSN_HOST(threshold_lsn, threshold_cycle,
1239                        threshold_block);
1240
1241         /* Don't pass in an lsn greater than the lsn of the last
1242          * log record known to be on disk.
1243          */
1244         if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
1245             threshold_lsn = log->l_last_sync_lsn;
1246     }
1247     GRANT_UNLOCK(log, s);
1248
1249     /*
1250      * Get the transaction layer to kick the dirty buffers out to
1251      * disk asynchronously. No point in trying to do this if
1252      * the filesystem is shutting down.
1253      */
1254     if (threshold_lsn &&
1255         !XLOG_FORCED_SHUTDOWN(log))
1256             xfs_trans_push_ail(mp, threshold_lsn);
1257 }       /* xlog_grant_push_ail */
1258
1259
1260 /*
1261  * Flush out the in-core log (iclog) to the on-disk log in an asynchronous 
1262  * fashion.  Previously, we should have moved the current iclog
1263  * ptr in the log to point to the next available iclog.  This allows further
1264  * write to continue while this code syncs out an iclog ready to go.
1265  * Before an in-core log can be written out, the data section must be scanned
1266  * to save away the 1st word of each BBSIZE block into the header.  We replace
1267  * it with the current cycle count.  Each BBSIZE block is tagged with the
1268  * cycle count because there in an implicit assumption that drives will
1269  * guarantee that entire 512 byte blocks get written at once.  In other words,
1270  * we can't have part of a 512 byte block written and part not written.  By
1271  * tagging each block, we will know which blocks are valid when recovering
1272  * after an unclean shutdown.
1273  *
1274  * This routine is single threaded on the iclog.  No other thread can be in
1275  * this routine with the same iclog.  Changing contents of iclog can there-
1276  * fore be done without grabbing the state machine lock.  Updating the global
1277  * log will require grabbing the lock though.
1278  *
1279  * The entire log manager uses a logical block numbering scheme.  Only
1280  * log_sync (and then only bwrite()) know about the fact that the log may
1281  * not start with block zero on a given device.  The log block start offset
1282  * is added immediately before calling bwrite().
1283  */
1284
1285 int
1286 xlog_sync(xlog_t                *log,
1287           xlog_in_core_t        *iclog)
1288 {
1289         xfs_caddr_t     dptr;           /* pointer to byte sized element */
1290         xfs_buf_t       *bp;
1291         int             i, ops;
1292         uint            count;          /* byte count of bwrite */
1293         uint            count_init;     /* initial count before roundup */
1294         int             roundoff;       /* roundoff to BB or stripe */
1295         int             split = 0;      /* split write into two regions */
1296         int             error;
1297         SPLDECL(s);
1298         int             v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb);
1299
1300         XFS_STATS_INC(xs_log_writes);
1301         ASSERT(iclog->ic_refcnt == 0);
1302
1303         /* Add for LR header */
1304         count_init = log->l_iclog_hsize + iclog->ic_offset;
1305
1306         /* Round out the log write size */
1307         if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1308                 /* we have a v2 stripe unit to use */
1309                 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1310         } else {
1311                 count = BBTOB(BTOBB(count_init));
1312         }
1313         roundoff = count - count_init;
1314         ASSERT(roundoff >= 0);
1315         ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && 
1316                 roundoff < log->l_mp->m_sb.sb_logsunit)
1317                 || 
1318                 (log->l_mp->m_sb.sb_logsunit <= 1 && 
1319                  roundoff < BBTOB(1)));
1320
1321         /* move grant heads by roundoff in sync */
1322         s = GRANT_LOCK(log);
1323         XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
1324         XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
1325         GRANT_UNLOCK(log, s);
1326
1327         /* put cycle number in every block */
1328         xlog_pack_data(log, iclog, roundoff); 
1329
1330         /* real byte length */
1331         if (v2) {
1332                 INT_SET(iclog->ic_header.h_len, 
1333                         ARCH_CONVERT,
1334                         iclog->ic_offset + roundoff);
1335         } else {
1336                 INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
1337         }
1338
1339         /* put ops count in correct order */
1340         ops = iclog->ic_header.h_num_logops;
1341         INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
1342
1343         bp          = iclog->ic_bp;
1344         ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
1345         XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1346         XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)));
1347
1348         XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
1349
1350         /* Do we need to split this write into 2 parts? */
1351         if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1352                 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1353                 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1354                 iclog->ic_bwritecnt = 2;        /* split into 2 writes */
1355         } else {
1356                 iclog->ic_bwritecnt = 1;
1357         }
1358         XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count);
1359         XFS_BUF_SET_FSPRIVATE(bp, iclog);       /* save for later */
1360         XFS_BUF_BUSY(bp);
1361         XFS_BUF_ASYNC(bp);
1362         /*
1363          * Do an ordered write for the log block.
1364          *
1365          * It may not be needed to flush the first split block in the log wrap
1366          * case, but do it anyways to be safe -AK
1367          */
1368         if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1369                 XFS_BUF_ORDERED(bp);
1370
1371         ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1372         ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1373
1374         xlog_verify_iclog(log, iclog, count, B_TRUE);
1375
1376         /* account for log which doesn't start at block #0 */
1377         XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1378         /*
1379          * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1380          * is shutting down.
1381          */
1382         XFS_BUF_WRITE(bp);
1383
1384         if ((error = XFS_bwrite(bp))) {
1385                 xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
1386                                   XFS_BUF_ADDR(bp));
1387                 return (error);
1388         }
1389         if (split) {
1390                 bp              = iclog->ic_log->l_xbuf;
1391                 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
1392                                                         (unsigned long)1);
1393                 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
1394                 XFS_BUF_SET_ADDR(bp, 0);             /* logical 0 */
1395                 XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
1396                                             (__psint_t)count), split);
1397                 XFS_BUF_SET_FSPRIVATE(bp, iclog);
1398                 XFS_BUF_BUSY(bp);
1399                 XFS_BUF_ASYNC(bp);
1400                 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1401                         XFS_BUF_ORDERED(bp);
1402                 dptr = XFS_BUF_PTR(bp);
1403                 /*
1404                  * Bump the cycle numbers at the start of each block
1405                  * since this part of the buffer is at the start of
1406                  * a new cycle.  Watch out for the header magic number
1407                  * case, though.
1408                  */
1409                 for (i=0; i<split; i += BBSIZE) {
1410                         INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
1411                         if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
1412                                 INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1);
1413                         dptr += BBSIZE;
1414                 }
1415
1416                 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1417                 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1418
1419                 /* account for internal log which does't start at block #0 */
1420                 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1421                 XFS_BUF_WRITE(bp);
1422                 if ((error = XFS_bwrite(bp))) {
1423                         xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
1424                                           bp, XFS_BUF_ADDR(bp));
1425                         return (error);
1426                 }
1427         }
1428         return (0);
1429 }       /* xlog_sync */
1430
1431
1432 /*
1433  * Unallocate a log structure
1434  */
1435 void
1436 xlog_unalloc_log(xlog_t *log)
1437 {
1438         xlog_in_core_t  *iclog, *next_iclog;
1439         xlog_ticket_t   *tic, *next_tic;
1440         int             i;
1441
1442
1443         iclog = log->l_iclog;
1444         for (i=0; i<log->l_iclog_bufs; i++) {
1445                 sv_destroy(&iclog->ic_forcesema);
1446                 sv_destroy(&iclog->ic_writesema);
1447                 xfs_buf_free(iclog->ic_bp);
1448 #ifdef XFS_LOG_TRACE
1449                 if (iclog->ic_trace != NULL) {
1450                         ktrace_free(iclog->ic_trace);
1451                 }
1452 #endif
1453                 next_iclog = iclog->ic_next;
1454                 kmem_free(iclog->hic_data, log->l_iclog_size);
1455                 kmem_free(iclog, sizeof(xlog_in_core_t));
1456                 iclog = next_iclog;
1457         }
1458         freesema(&log->l_flushsema);
1459         spinlock_destroy(&log->l_icloglock);
1460         spinlock_destroy(&log->l_grant_lock);
1461
1462         /* XXXsup take a look at this again. */
1463         if ((log->l_ticket_cnt != log->l_ticket_tcnt)  &&
1464             !XLOG_FORCED_SHUTDOWN(log)) {
1465                 xfs_fs_cmn_err(CE_WARN, log->l_mp,
1466                         "xlog_unalloc_log: (cnt: %d, total: %d)",
1467                         log->l_ticket_cnt, log->l_ticket_tcnt);
1468                 /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
1469
1470         } else {
1471                 tic = log->l_unmount_free;
1472                 while (tic) {
1473                         next_tic = tic->t_next;
1474                         kmem_free(tic, NBPP);
1475                         tic = next_tic;
1476                 }
1477         }
1478         xfs_buf_free(log->l_xbuf);
1479 #ifdef XFS_LOG_TRACE
1480         if (log->l_trace != NULL) {
1481                 ktrace_free(log->l_trace);
1482         }
1483         if (log->l_grant_trace != NULL) {
1484                 ktrace_free(log->l_grant_trace);
1485         }
1486 #endif
1487         log->l_mp->m_log = NULL;
1488         kmem_free(log, sizeof(xlog_t));
1489 }       /* xlog_unalloc_log */
1490
1491 /*
1492  * Update counters atomically now that memcpy is done.
1493  */
1494 /* ARGSUSED */
1495 static inline void
1496 xlog_state_finish_copy(xlog_t           *log,
1497                        xlog_in_core_t   *iclog,
1498                        int              record_cnt,
1499                        int              copy_bytes)
1500 {
1501         SPLDECL(s);
1502
1503         s = LOG_LOCK(log);
1504
1505         iclog->ic_header.h_num_logops += record_cnt;
1506         iclog->ic_offset += copy_bytes;
1507
1508         LOG_UNLOCK(log, s);
1509 }       /* xlog_state_finish_copy */
1510
1511
1512
1513
1514 /*
1515  * print out info relating to regions written which consume
1516  * the reservation
1517  */
1518 #if defined(XFS_LOG_RES_DEBUG)
1519 STATIC void
1520 xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1521 {
1522         uint i;
1523         uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1524
1525         /* match with XLOG_REG_TYPE_* in xfs_log.h */
1526         static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1527             "bformat",
1528             "bchunk",
1529             "efi_format",
1530             "efd_format",
1531             "iformat",
1532             "icore",
1533             "iext",
1534             "ibroot",
1535             "ilocal",
1536             "iattr_ext",
1537             "iattr_broot",
1538             "iattr_local",
1539             "qformat",
1540             "dquot",
1541             "quotaoff",
1542             "LR header",
1543             "unmount",
1544             "commit",
1545             "trans header"
1546         };
1547         static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1548             "SETATTR_NOT_SIZE",
1549             "SETATTR_SIZE",
1550             "INACTIVE",
1551             "CREATE",
1552             "CREATE_TRUNC",
1553             "TRUNCATE_FILE",
1554             "REMOVE",
1555             "LINK",
1556             "RENAME",
1557             "MKDIR",
1558             "RMDIR",
1559             "SYMLINK",
1560             "SET_DMATTRS",
1561             "GROWFS",
1562             "STRAT_WRITE",
1563             "DIOSTRAT",
1564             "WRITE_SYNC",
1565             "WRITEID",
1566             "ADDAFORK",
1567             "ATTRINVAL",
1568             "ATRUNCATE",
1569             "ATTR_SET",
1570             "ATTR_RM",
1571             "ATTR_FLAG",
1572             "CLEAR_AGI_BUCKET",
1573             "QM_SBCHANGE",
1574             "DUMMY1",
1575             "DUMMY2",
1576             "QM_QUOTAOFF",
1577             "QM_DQALLOC",
1578             "QM_SETQLIM",
1579             "QM_DQCLUSTER",
1580             "QM_QINOCREATE",
1581             "QM_QUOTAOFF_END",
1582             "SB_UNIT",
1583             "FSYNC_TS",
1584             "GROWFSRT_ALLOC",
1585             "GROWFSRT_ZERO",
1586             "GROWFSRT_FREE",
1587             "SWAPEXT"
1588         };
1589
1590         xfs_fs_cmn_err(CE_WARN, mp,
1591                         "xfs_log_write: reservation summary:\n"
1592                         "  trans type  = %s (%u)\n"
1593                         "  unit res    = %d bytes\n"
1594                         "  current res = %d bytes\n"
1595                         "  total reg   = %u bytes (o/flow = %u bytes)\n"
1596                         "  ophdrs      = %u (ophdr space = %u bytes)\n"
1597                         "  ophdr + reg = %u bytes\n"
1598                         "  num regions = %u\n",
1599                         ((ticket->t_trans_type <= 0 ||
1600                           ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1601                           "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1602                         ticket->t_trans_type,
1603                         ticket->t_unit_res,
1604                         ticket->t_curr_res,
1605                         ticket->t_res_arr_sum, ticket->t_res_o_flow,
1606                         ticket->t_res_num_ophdrs, ophdr_spc,
1607                         ticket->t_res_arr_sum + 
1608                           ticket->t_res_o_flow + ophdr_spc,
1609                         ticket->t_res_num);
1610
1611         for (i = 0; i < ticket->t_res_num; i++) {
1612                 uint r_type = ticket->t_res_arr[i].r_type; 
1613                 cmn_err(CE_WARN,
1614                             "region[%u]: %s - %u bytes\n",
1615                             i, 
1616                             ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1617                             "bad-rtype" : res_type_str[r_type-1]),
1618                             ticket->t_res_arr[i].r_len);
1619         }
1620 }
1621 #else
1622 #define xlog_print_tic_res(mp, ticket)
1623 #endif
1624
1625 /*
1626  * Write some region out to in-core log
1627  *
1628  * This will be called when writing externally provided regions or when
1629  * writing out a commit record for a given transaction.
1630  *
1631  * General algorithm:
1632  *      1. Find total length of this write.  This may include adding to the
1633  *              lengths passed in.
1634  *      2. Check whether we violate the tickets reservation.
1635  *      3. While writing to this iclog
1636  *          A. Reserve as much space in this iclog as can get
1637  *          B. If this is first write, save away start lsn
1638  *          C. While writing this region:
1639  *              1. If first write of transaction, write start record
1640  *              2. Write log operation header (header per region)
1641  *              3. Find out if we can fit entire region into this iclog
1642  *              4. Potentially, verify destination memcpy ptr
1643  *              5. Memcpy (partial) region
1644  *              6. If partial copy, release iclog; otherwise, continue
1645  *                      copying more regions into current iclog
1646  *      4. Mark want sync bit (in simulation mode)
1647  *      5. Release iclog for potential flush to on-disk log.
1648  *
1649  * ERRORS:
1650  * 1.   Panic if reservation is overrun.  This should never happen since
1651  *      reservation amounts are generated internal to the filesystem.
1652  * NOTES:
1653  * 1. Tickets are single threaded data structures.
1654  * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the
1655  *      syncing routine.  When a single log_write region needs to span
1656  *      multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set
1657  *      on all log operation writes which don't contain the end of the
1658  *      region.  The XLOG_END_TRANS bit is used for the in-core log
1659  *      operation which contains the end of the continued log_write region.
1660  * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
1661  *      we don't really know exactly how much space will be used.  As a result,
1662  *      we don't update ic_offset until the end when we know exactly how many
1663  *      bytes have been written out.
1664  */
1665 int
1666 xlog_write(xfs_mount_t *        mp,
1667            xfs_log_iovec_t      reg[],
1668            int                  nentries,
1669            xfs_log_ticket_t     tic,
1670            xfs_lsn_t            *start_lsn,
1671            xlog_in_core_t       **commit_iclog,
1672            uint                 flags)
1673 {
1674     xlog_t           *log    = mp->m_log;
1675     xlog_ticket_t    *ticket = (xlog_ticket_t *)tic;
1676     xlog_op_header_t *logop_head;    /* ptr to log operation header */
1677     xlog_in_core_t   *iclog;         /* ptr to current in-core log */
1678     __psint_t        ptr;            /* copy address into data region */
1679     int              len;            /* # xlog_write() bytes 2 still copy */
1680     int              index;          /* region index currently copying */
1681     int              log_offset;     /* offset (from 0) into data region */
1682     int              start_rec_copy; /* # bytes to copy for start record */
1683     int              partial_copy;   /* did we split a region? */
1684     int              partial_copy_len;/* # bytes copied if split region */
1685     int              need_copy;      /* # bytes need to memcpy this region */
1686     int              copy_len;       /* # bytes actually memcpy'ing */
1687     int              copy_off;       /* # bytes from entry start */
1688     int              contwr;         /* continued write of in-core log? */
1689     int              error;
1690     int              record_cnt = 0, data_cnt = 0;
1691
1692     partial_copy_len = partial_copy = 0;
1693
1694     /* Calculate potential maximum space.  Each region gets its own
1695      * xlog_op_header_t and may need to be double word aligned.
1696      */
1697     len = 0;
1698     if (ticket->t_flags & XLOG_TIC_INITED) {    /* acct for start rec of xact */
1699         len += sizeof(xlog_op_header_t);
1700         XLOG_TIC_ADD_OPHDR(ticket);
1701     }
1702
1703     for (index = 0; index < nentries; index++) {
1704         len += sizeof(xlog_op_header_t);            /* each region gets >= 1 */
1705         XLOG_TIC_ADD_OPHDR(ticket);
1706         len += reg[index].i_len;
1707         XLOG_TIC_ADD_REGION(ticket, reg[index].i_len, reg[index].i_type);
1708     }
1709     contwr = *start_lsn = 0;
1710
1711     if (ticket->t_curr_res < len) {
1712         xlog_print_tic_res(mp, ticket);
1713 #ifdef DEBUG
1714         xlog_panic(
1715                 "xfs_log_write: reservation ran out. Need to up reservation");
1716 #else
1717         /* Customer configurable panic */
1718         xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp,
1719                 "xfs_log_write: reservation ran out. Need to up reservation");
1720         /* If we did not panic, shutdown the filesystem */
1721         xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
1722 #endif
1723     } else
1724         ticket->t_curr_res -= len;
1725
1726     for (index = 0; index < nentries; ) {
1727         if ((error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
1728                                                &contwr, &log_offset)))
1729                 return (error);
1730
1731         ASSERT(log_offset <= iclog->ic_size - 1);
1732         ptr = (__psint_t) ((char *)iclog->ic_datap+log_offset);
1733
1734         /* start_lsn is the first lsn written to. That's all we need. */
1735         if (! *start_lsn)
1736             *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
1737
1738         /* This loop writes out as many regions as can fit in the amount
1739          * of space which was allocated by xlog_state_get_iclog_space().
1740          */
1741         while (index < nentries) {
1742             ASSERT(reg[index].i_len % sizeof(__int32_t) == 0);
1743             ASSERT((__psint_t)ptr % sizeof(__int32_t) == 0);
1744             start_rec_copy = 0;
1745
1746             /* If first write for transaction, insert start record.
1747              * We can't be trying to commit if we are inited.  We can't
1748              * have any "partial_copy" if we are inited.
1749              */
1750             if (ticket->t_flags & XLOG_TIC_INITED) {
1751                 logop_head              = (xlog_op_header_t *)ptr;
1752                 INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid);
1753                 logop_head->oh_clientid = ticket->t_clientid;
1754                 logop_head->oh_len      = 0;
1755                 logop_head->oh_flags    = XLOG_START_TRANS;
1756                 logop_head->oh_res2     = 0;
1757                 ticket->t_flags         &= ~XLOG_TIC_INITED;    /* clear bit */
1758                 record_cnt++;
1759
1760                 start_rec_copy = sizeof(xlog_op_header_t);
1761                 xlog_write_adv_cnt(ptr, len, log_offset, start_rec_copy);
1762             }
1763
1764             /* Copy log operation header directly into data section */
1765             logop_head                  = (xlog_op_header_t *)ptr;
1766             INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid);
1767             logop_head->oh_clientid     = ticket->t_clientid;
1768             logop_head->oh_res2         = 0;
1769
1770             /* header copied directly */
1771             xlog_write_adv_cnt(ptr, len, log_offset, sizeof(xlog_op_header_t));
1772
1773             /* are we copying a commit or unmount record? */
1774             logop_head->oh_flags = flags;
1775
1776             /*
1777              * We've seen logs corrupted with bad transaction client
1778              * ids.  This makes sure that XFS doesn't generate them on.
1779              * Turn this into an EIO and shut down the filesystem.
1780              */
1781             switch (logop_head->oh_clientid)  {
1782             case XFS_TRANSACTION:
1783             case XFS_VOLUME:
1784             case XFS_LOG:
1785                 break;
1786             default:
1787                 xfs_fs_cmn_err(CE_WARN, mp,
1788                     "Bad XFS transaction clientid 0x%x in ticket 0x%p",
1789                     logop_head->oh_clientid, tic);
1790                 return XFS_ERROR(EIO);
1791             }
1792
1793             /* Partial write last time? => (partial_copy != 0)
1794              * need_copy is the amount we'd like to copy if everything could
1795              * fit in the current memcpy.
1796              */
1797             need_copy = reg[index].i_len - partial_copy_len;
1798
1799             copy_off = partial_copy_len;
1800             if (need_copy <= iclog->ic_size - log_offset) { /*complete write */
1801                 INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy);
1802                 if (partial_copy)
1803                     logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
1804                 partial_copy_len = partial_copy = 0;
1805             } else {                                        /* partial write */
1806                 copy_len = iclog->ic_size - log_offset;
1807                 INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len);
1808                 logop_head->oh_flags |= XLOG_CONTINUE_TRANS;
1809                 if (partial_copy)
1810                         logop_head->oh_flags |= XLOG_WAS_CONT_TRANS;
1811                 partial_copy_len += copy_len;
1812                 partial_copy++;
1813                 len += sizeof(xlog_op_header_t); /* from splitting of region */
1814                 /* account for new log op header */
1815                 ticket->t_curr_res -= sizeof(xlog_op_header_t);
1816                 XLOG_TIC_ADD_OPHDR(ticket);
1817             }
1818             xlog_verify_dest_ptr(log, ptr);
1819
1820             /* copy region */
1821             ASSERT(copy_len >= 0);
1822             memcpy((xfs_caddr_t)ptr, reg[index].i_addr + copy_off, copy_len);
1823             xlog_write_adv_cnt(ptr, len, log_offset, copy_len);
1824
1825             /* make copy_len total bytes copied, including headers */
1826             copy_len += start_rec_copy + sizeof(xlog_op_header_t);
1827             record_cnt++;
1828             data_cnt += contwr ? copy_len : 0;
1829             if (partial_copy) {                 /* copied partial region */
1830                     /* already marked WANT_SYNC by xlog_state_get_iclog_space */
1831                     xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
1832                     record_cnt = data_cnt = 0;
1833                     if ((error = xlog_state_release_iclog(log, iclog)))
1834                             return (error);
1835                     break;                      /* don't increment index */
1836             } else {                            /* copied entire region */
1837                 index++;
1838                 partial_copy_len = partial_copy = 0;
1839
1840                 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
1841                     xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
1842                     record_cnt = data_cnt = 0;
1843                     xlog_state_want_sync(log, iclog);
1844                     if (commit_iclog) {
1845                         ASSERT(flags & XLOG_COMMIT_TRANS);
1846                         *commit_iclog = iclog;
1847                     } else if ((error = xlog_state_release_iclog(log, iclog)))
1848                            return (error);
1849                     if (index == nentries)
1850                             return 0;           /* we are done */
1851                     else
1852                             break;
1853                 }
1854             } /* if (partial_copy) */
1855         } /* while (index < nentries) */
1856     } /* for (index = 0; index < nentries; ) */
1857     ASSERT(len == 0);
1858
1859     xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
1860     if (commit_iclog) {
1861         ASSERT(flags & XLOG_COMMIT_TRANS);
1862         *commit_iclog = iclog;
1863         return 0;
1864     }
1865     return (xlog_state_release_iclog(log, iclog));
1866 }       /* xlog_write */
1867
1868
1869 /*****************************************************************************
1870  *
1871  *              State Machine functions
1872  *
1873  *****************************************************************************
1874  */
1875
1876 /* Clean iclogs starting from the head.  This ordering must be
1877  * maintained, so an iclog doesn't become ACTIVE beyond one that
1878  * is SYNCING.  This is also required to maintain the notion that we use
1879  * a counting semaphore to hold off would be writers to the log when every
1880  * iclog is trying to sync to disk.
1881  *
1882  * State Change: DIRTY -> ACTIVE
1883  */
1884 STATIC void
1885 xlog_state_clean_log(xlog_t *log)
1886 {
1887         xlog_in_core_t  *iclog;
1888         int changed = 0;
1889
1890         iclog = log->l_iclog;
1891         do {
1892                 if (iclog->ic_state == XLOG_STATE_DIRTY) {
1893                         iclog->ic_state = XLOG_STATE_ACTIVE;
1894                         iclog->ic_offset       = 0;
1895                         iclog->ic_callback      = NULL;   /* don't need to free */
1896                         /*
1897                          * If the number of ops in this iclog indicate it just
1898                          * contains the dummy transaction, we can
1899                          * change state into IDLE (the second time around).
1900                          * Otherwise we should change the state into
1901                          * NEED a dummy.
1902                          * We don't need to cover the dummy.
1903                          */
1904                         if (!changed &&
1905                            (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) {
1906                                 changed = 1;
1907                         } else {
1908                                 /*
1909                                  * We have two dirty iclogs so start over
1910                                  * This could also be num of ops indicates
1911                                  * this is not the dummy going out.
1912                                  */
1913                                 changed = 2;
1914                         }
1915                         iclog->ic_header.h_num_logops = 0;
1916                         memset(iclog->ic_header.h_cycle_data, 0,
1917                               sizeof(iclog->ic_header.h_cycle_data));
1918                         iclog->ic_header.h_lsn = 0;
1919                 } else if (iclog->ic_state == XLOG_STATE_ACTIVE)
1920                         /* do nothing */;
1921                 else
1922                         break;  /* stop cleaning */
1923                 iclog = iclog->ic_next;
1924         } while (iclog != log->l_iclog);
1925
1926         /* log is locked when we are called */
1927         /*
1928          * Change state for the dummy log recording.
1929          * We usually go to NEED. But we go to NEED2 if the changed indicates
1930          * we are done writing the dummy record.
1931          * If we are done with the second dummy recored (DONE2), then
1932          * we go to IDLE.
1933          */
1934         if (changed) {
1935                 switch (log->l_covered_state) {
1936                 case XLOG_STATE_COVER_IDLE:
1937                 case XLOG_STATE_COVER_NEED:
1938                 case XLOG_STATE_COVER_NEED2:
1939                         log->l_covered_state = XLOG_STATE_COVER_NEED;
1940                         break;
1941
1942                 case XLOG_STATE_COVER_DONE:
1943                         if (changed == 1)
1944                                 log->l_covered_state = XLOG_STATE_COVER_NEED2;
1945                         else
1946                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
1947                         break;
1948
1949                 case XLOG_STATE_COVER_DONE2:
1950                         if (changed == 1)
1951                                 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1952                         else
1953                                 log->l_covered_state = XLOG_STATE_COVER_NEED;
1954                         break;
1955
1956                 default:
1957                         ASSERT(0);
1958                 }
1959         }
1960 }       /* xlog_state_clean_log */
1961
1962 STATIC xfs_lsn_t
1963 xlog_get_lowest_lsn(
1964         xlog_t          *log)
1965 {
1966         xlog_in_core_t  *lsn_log;
1967         xfs_lsn_t       lowest_lsn, lsn;
1968
1969         lsn_log = log->l_iclog;
1970         lowest_lsn = 0;
1971         do {
1972             if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) {
1973                 lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT);
1974                 if ((lsn && !lowest_lsn) ||
1975                     (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) {
1976                         lowest_lsn = lsn;
1977                 }
1978             }
1979             lsn_log = lsn_log->ic_next;
1980         } while (lsn_log != log->l_iclog);
1981         return(lowest_lsn);
1982 }
1983
1984
1985 STATIC void
1986 xlog_state_do_callback(
1987         xlog_t          *log,
1988         int             aborted,
1989         xlog_in_core_t  *ciclog)
1990 {
1991         xlog_in_core_t     *iclog;
1992         xlog_in_core_t     *first_iclog;        /* used to know when we've
1993                                                  * processed all iclogs once */
1994         xfs_log_callback_t *cb, *cb_next;
1995         int                flushcnt = 0;
1996         xfs_lsn_t          lowest_lsn;
1997         int                ioerrors;    /* counter: iclogs with errors */
1998         int                loopdidcallbacks; /* flag: inner loop did callbacks*/
1999         int                funcdidcallbacks; /* flag: function did callbacks */
2000         int                repeats;     /* for issuing console warnings if
2001                                          * looping too many times */
2002         SPLDECL(s);
2003
2004         s = LOG_LOCK(log);
2005         first_iclog = iclog = log->l_iclog;
2006         ioerrors = 0;
2007         funcdidcallbacks = 0;
2008         repeats = 0;
2009
2010         do {
2011                 /*
2012                  * Scan all iclogs starting with the one pointed to by the
2013                  * log.  Reset this starting point each time the log is
2014                  * unlocked (during callbacks).
2015                  *
2016                  * Keep looping through iclogs until one full pass is made
2017                  * without running any callbacks.
2018                  */
2019                 first_iclog = log->l_iclog;
2020                 iclog = log->l_iclog;
2021                 loopdidcallbacks = 0;
2022                 repeats++;
2023
2024                 do {
2025
2026                         /* skip all iclogs in the ACTIVE & DIRTY states */
2027                         if (iclog->ic_state &
2028                             (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) {
2029                                 iclog = iclog->ic_next;
2030                                 continue;
2031                         }
2032
2033                         /*
2034                          * Between marking a filesystem SHUTDOWN and stopping
2035                          * the log, we do flush all iclogs to disk (if there
2036                          * wasn't a log I/O error). So, we do want things to
2037                          * go smoothly in case of just a SHUTDOWN  w/o a
2038                          * LOG_IO_ERROR.
2039                          */
2040                         if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
2041                                 /*
2042                                  * Can only perform callbacks in order.  Since
2043                                  * this iclog is not in the DONE_SYNC/
2044                                  * DO_CALLBACK state, we skip the rest and
2045                                  * just try to clean up.  If we set our iclog
2046                                  * to DO_CALLBACK, we will not process it when
2047                                  * we retry since a previous iclog is in the
2048                                  * CALLBACK and the state cannot change since
2049                                  * we are holding the LOG_LOCK.
2050                                  */
2051                                 if (!(iclog->ic_state &
2052                                         (XLOG_STATE_DONE_SYNC |
2053                                                  XLOG_STATE_DO_CALLBACK))) {
2054                                         if (ciclog && (ciclog->ic_state ==
2055                                                         XLOG_STATE_DONE_SYNC)) {
2056                                                 ciclog->ic_state = XLOG_STATE_DO_CALLBACK;
2057                                         }
2058                                         break;
2059                                 }
2060                                 /*
2061                                  * We now have an iclog that is in either the
2062                                  * DO_CALLBACK or DONE_SYNC states. The other
2063                                  * states (WANT_SYNC, SYNCING, or CALLBACK were
2064                                  * caught by the above if and are going to
2065                                  * clean (i.e. we aren't doing their callbacks)
2066                                  * see the above if.
2067                                  */
2068
2069                                 /*
2070                                  * We will do one more check here to see if we
2071                                  * have chased our tail around.
2072                                  */
2073
2074                                 lowest_lsn = xlog_get_lowest_lsn(log);
2075                                 if (lowest_lsn && (
2076                                         XFS_LSN_CMP(
2077                                                 lowest_lsn,
2078                                                 INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
2079                                         )<0)) {
2080                                         iclog = iclog->ic_next;
2081                                         continue; /* Leave this iclog for
2082                                                    * another thread */
2083                                 }
2084
2085                                 iclog->ic_state = XLOG_STATE_CALLBACK;
2086
2087                                 LOG_UNLOCK(log, s);
2088
2089                                 /* l_last_sync_lsn field protected by
2090                                  * GRANT_LOCK. Don't worry about iclog's lsn.
2091                                  * No one else can be here except us.
2092                                  */
2093                                 s = GRANT_LOCK(log);
2094                                 ASSERT(XFS_LSN_CMP(
2095                                                 log->l_last_sync_lsn,
2096                                                 INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT)
2097                                         )<=0);
2098                                 log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
2099                                 GRANT_UNLOCK(log, s);
2100
2101                                 /*
2102                                  * Keep processing entries in the callback list
2103                                  * until we come around and it is empty.  We
2104                                  * need to atomically see that the list is
2105                                  * empty and change the state to DIRTY so that
2106                                  * we don't miss any more callbacks being added.
2107                                  */
2108                                 s = LOG_LOCK(log);
2109                         } else {
2110                                 ioerrors++;
2111                         }
2112                         cb = iclog->ic_callback;
2113
2114                         while (cb != 0) {
2115                                 iclog->ic_callback_tail = &(iclog->ic_callback);
2116                                 iclog->ic_callback = NULL;
2117                                 LOG_UNLOCK(log, s);
2118
2119                                 /* perform callbacks in the order given */
2120                                 for (; cb != 0; cb = cb_next) {
2121                                         cb_next = cb->cb_next;
2122                                         cb->cb_func(cb->cb_arg, aborted);
2123                                 }
2124                                 s = LOG_LOCK(log);
2125                                 cb = iclog->ic_callback;
2126                         }
2127
2128                         loopdidcallbacks++;
2129                         funcdidcallbacks++;
2130
2131                         ASSERT(iclog->ic_callback == 0);
2132                         if (!(iclog->ic_state & XLOG_STATE_IOERROR))
2133                                 iclog->ic_state = XLOG_STATE_DIRTY;
2134
2135                         /*
2136                          * Transition from DIRTY to ACTIVE if applicable.
2137                          * NOP if STATE_IOERROR.
2138                          */
2139                         xlog_state_clean_log(log);
2140
2141                         /* wake up threads waiting in xfs_log_force() */
2142                         sv_broadcast(&iclog->ic_forcesema);
2143
2144                         iclog = iclog->ic_next;
2145                 } while (first_iclog != iclog);
2146                 if (repeats && (repeats % 10) == 0) {
2147                         xfs_fs_cmn_err(CE_WARN, log->l_mp,
2148                                 "xlog_state_do_callback: looping %d", repeats);
2149                 }
2150         } while (!ioerrors && loopdidcallbacks);
2151
2152         /*
2153          * make one last gasp attempt to see if iclogs are being left in
2154          * limbo..
2155          */
2156 #ifdef DEBUG
2157         if (funcdidcallbacks) {
2158                 first_iclog = iclog = log->l_iclog;
2159                 do {
2160                         ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK);
2161                         /*
2162                          * Terminate the loop if iclogs are found in states
2163                          * which will cause other threads to clean up iclogs.
2164                          *
2165                          * SYNCING - i/o completion will go through logs
2166                          * DONE_SYNC - interrupt thread should be waiting for
2167                          *              LOG_LOCK
2168                          * IOERROR - give up hope all ye who enter here
2169                          */
2170                         if (iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2171                             iclog->ic_state == XLOG_STATE_SYNCING ||
2172                             iclog->ic_state == XLOG_STATE_DONE_SYNC ||
2173                             iclog->ic_state == XLOG_STATE_IOERROR )
2174                                 break;
2175                         iclog = iclog->ic_next;
2176                 } while (first_iclog != iclog);
2177         }
2178 #endif
2179
2180         if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) {
2181                 flushcnt = log->l_flushcnt;
2182                 log->l_flushcnt = 0;
2183         }
2184         LOG_UNLOCK(log, s);
2185         while (flushcnt--)
2186                 vsema(&log->l_flushsema);
2187 }       /* xlog_state_do_callback */
2188
2189
2190 /*
2191  * Finish transitioning this iclog to the dirty state.
2192  *
2193  * Make sure that we completely execute this routine only when this is
2194  * the last call to the iclog.  There is a good chance that iclog flushes,
2195  * when we reach the end of the physical log, get turned into 2 separate
2196  * calls to bwrite.  Hence, one iclog flush could generate two calls to this
2197  * routine.  By using the reference count bwritecnt, we guarantee that only
2198  * the second completion goes through.
2199  *
2200  * Callbacks could take time, so they are done outside the scope of the
2201  * global state machine log lock.  Assume that the calls to cvsema won't
2202  * take a long time.  At least we know it won't sleep.
2203  */
2204 void
2205 xlog_state_done_syncing(
2206         xlog_in_core_t  *iclog,
2207         int             aborted)
2208 {
2209         xlog_t             *log = iclog->ic_log;
2210         SPLDECL(s);
2211
2212         s = LOG_LOCK(log);
2213
2214         ASSERT(iclog->ic_state == XLOG_STATE_SYNCING ||
2215                iclog->ic_state == XLOG_STATE_IOERROR);
2216         ASSERT(iclog->ic_refcnt == 0);
2217         ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2);
2218
2219
2220         /*
2221          * If we got an error, either on the first buffer, or in the case of
2222          * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2223          * and none should ever be attempted to be written to disk
2224          * again.
2225          */
2226         if (iclog->ic_state != XLOG_STATE_IOERROR) {
2227                 if (--iclog->ic_bwritecnt == 1) {
2228                         LOG_UNLOCK(log, s);
2229                         return;
2230                 }
2231                 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2232         }
2233
2234         /*
2235          * Someone could be sleeping prior to writing out the next
2236          * iclog buffer, we wake them all, one will get to do the
2237          * I/O, the others get to wait for the result.
2238          */
2239         sv_broadcast(&iclog->ic_writesema);
2240         LOG_UNLOCK(log, s);
2241         xlog_state_do_callback(log, aborted, iclog);    /* also cleans log */
2242 }       /* xlog_state_done_syncing */
2243
2244
2245 /*
2246  * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2247  * sleep.  The flush semaphore is set to the number of in-core buffers and
2248  * decremented around disk syncing.  Therefore, if all buffers are syncing,
2249  * this semaphore will cause new writes to sleep until a sync completes.
2250  * Otherwise, this code just does p() followed by v().  This approximates
2251  * a sleep/wakeup except we can't race.
2252  *
2253  * The in-core logs are used in a circular fashion. They are not used
2254  * out-of-order even when an iclog past the head is free.
2255  *
2256  * return:
2257  *      * log_offset where xlog_write() can start writing into the in-core
2258  *              log's data space.
2259  *      * in-core log pointer to which xlog_write() should write.
2260  *      * boolean indicating this is a continued write to an in-core log.
2261  *              If this is the last write, then the in-core log's offset field
2262  *              needs to be incremented, depending on the amount of data which
2263  *              is copied.
2264  */
2265 int
2266 xlog_state_get_iclog_space(xlog_t         *log,
2267                            int            len,
2268                            xlog_in_core_t **iclogp,
2269                            xlog_ticket_t  *ticket,
2270                            int            *continued_write,
2271                            int            *logoffsetp)
2272 {
2273         SPLDECL(s);
2274         int               log_offset;
2275         xlog_rec_header_t *head;
2276         xlog_in_core_t    *iclog;
2277         int               error;
2278
2279 restart:
2280         s = LOG_LOCK(log);
2281         if (XLOG_FORCED_SHUTDOWN(log)) {
2282                 LOG_UNLOCK(log, s);
2283                 return XFS_ERROR(EIO);
2284         }
2285
2286         iclog = log->l_iclog;
2287         if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) {
2288                 log->l_flushcnt++;
2289                 LOG_UNLOCK(log, s);
2290                 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
2291                 XFS_STATS_INC(xs_log_noiclogs);
2292                 /* Ensure that log writes happen */
2293                 psema(&log->l_flushsema, PINOD);
2294                 goto restart;
2295         }
2296         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2297         head = &iclog->ic_header;
2298
2299         iclog->ic_refcnt++;                     /* prevents sync */
2300         log_offset = iclog->ic_offset;
2301
2302         /* On the 1st write to an iclog, figure out lsn.  This works
2303          * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are
2304          * committing to.  If the offset is set, that's how many blocks
2305          * must be written.
2306          */
2307         if (log_offset == 0) {
2308                 ticket->t_curr_res -= log->l_iclog_hsize;
2309                 XLOG_TIC_ADD_REGION(ticket,
2310                                     log->l_iclog_hsize,
2311                                     XLOG_REG_TYPE_LRHEADER);
2312                 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
2313                 ASSIGN_LSN(head->h_lsn, log);
2314                 ASSERT(log->l_curr_block >= 0);
2315         }
2316
2317         /* If there is enough room to write everything, then do it.  Otherwise,
2318          * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC
2319          * bit is on, so this will get flushed out.  Don't update ic_offset
2320          * until you know exactly how many bytes get copied.  Therefore, wait
2321          * until later to update ic_offset.
2322          *
2323          * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's
2324          * can fit into remaining data section.
2325          */
2326         if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2327                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2328
2329                 /* If I'm the only one writing to this iclog, sync it to disk */
2330                 if (iclog->ic_refcnt == 1) {
2331                         LOG_UNLOCK(log, s);
2332                         if ((error = xlog_state_release_iclog(log, iclog)))
2333                                 return (error);
2334                 } else {
2335                         iclog->ic_refcnt--;
2336                         LOG_UNLOCK(log, s);
2337                 }
2338                 goto restart;
2339         }
2340
2341         /* Do we have enough room to write the full amount in the remainder
2342          * of this iclog?  Or must we continue a write on the next iclog and
2343          * mark this iclog as completely taken?  In the case where we switch
2344          * iclogs (to mark it taken), this particular iclog will release/sync
2345          * to disk in xlog_write().
2346          */
2347         if (len <= iclog->ic_size - iclog->ic_offset) {
2348                 *continued_write = 0;
2349                 iclog->ic_offset += len;
2350         } else {
2351                 *continued_write = 1;
2352                 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2353         }
2354         *iclogp = iclog;
2355
2356         ASSERT(iclog->ic_offset <= iclog->ic_size);
2357         LOG_UNLOCK(log, s);
2358
2359         *logoffsetp = log_offset;
2360         return 0;
2361 }       /* xlog_state_get_iclog_space */
2362
2363 /*
2364  * Atomically get the log space required for a log ticket.
2365  *
2366  * Once a ticket gets put onto the reserveq, it will only return after
2367  * the needed reservation is satisfied.
2368  */
2369 STATIC int
2370 xlog_grant_log_space(xlog_t        *log,
2371                      xlog_ticket_t *tic)
2372 {
2373         int              free_bytes;
2374         int              need_bytes;
2375         SPLDECL(s);
2376 #ifdef DEBUG
2377         xfs_lsn_t        tail_lsn;
2378 #endif
2379
2380
2381 #ifdef DEBUG
2382         if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2383                 panic("grant Recovery problem");
2384 #endif
2385
2386         /* Is there space or do we need to sleep? */
2387         s = GRANT_LOCK(log);
2388         xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter");
2389
2390         /* something is already sleeping; insert new transaction at end */
2391         if (log->l_reserve_headq) {
2392                 XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
2393                 xlog_trace_loggrant(log, tic,
2394                                     "xlog_grant_log_space: sleep 1");
2395                 /*
2396                  * Gotta check this before going to sleep, while we're
2397                  * holding the grant lock.
2398                  */
2399                 if (XLOG_FORCED_SHUTDOWN(log))
2400                         goto error_return;
2401
2402                 XFS_STATS_INC(xs_sleep_logspace);
2403                 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
2404                 /*
2405                  * If we got an error, and the filesystem is shutting down,
2406                  * we'll catch it down below. So just continue...
2407                  */
2408                 xlog_trace_loggrant(log, tic,
2409                                     "xlog_grant_log_space: wake 1");
2410                 s = GRANT_LOCK(log);
2411         }
2412         if (tic->t_flags & XFS_LOG_PERM_RESERV)
2413                 need_bytes = tic->t_unit_res*tic->t_ocnt;
2414         else
2415                 need_bytes = tic->t_unit_res;
2416
2417 redo:
2418         if (XLOG_FORCED_SHUTDOWN(log))
2419                 goto error_return;
2420
2421         free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
2422                                      log->l_grant_reserve_bytes);
2423         if (free_bytes < need_bytes) {
2424                 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2425                         XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
2426                 xlog_trace_loggrant(log, tic,
2427                                     "xlog_grant_log_space: sleep 2");
2428                 XFS_STATS_INC(xs_sleep_logspace);
2429                 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
2430
2431                 if (XLOG_FORCED_SHUTDOWN(log)) {
2432                         s = GRANT_LOCK(log);
2433                         goto error_return;
2434                 }
2435
2436                 xlog_trace_loggrant(log, tic,
2437                                     "xlog_grant_log_space: wake 2");
2438                 xlog_grant_push_ail(log->l_mp, need_bytes);
2439                 s = GRANT_LOCK(log);
2440                 goto redo;
2441         } else if (tic->t_flags & XLOG_TIC_IN_Q)
2442                 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
2443
2444         /* we've got enough space */
2445         XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w');
2446         XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r');
2447 #ifdef DEBUG
2448         tail_lsn = log->l_tail_lsn;
2449         /*
2450          * Check to make sure the grant write head didn't just over lap the
2451          * tail.  If the cycles are the same, we can't be overlapping.
2452          * Otherwise, make sure that the cycles differ by exactly one and
2453          * check the byte count.
2454          */
2455         if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2456                 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2457                 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2458         }
2459 #endif
2460         xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit");
2461         xlog_verify_grant_head(log, 1);
2462         GRANT_UNLOCK(log, s);
2463         return 0;
2464
2465  error_return:
2466         if (tic->t_flags & XLOG_TIC_IN_Q)
2467                 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
2468         xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
2469         /*
2470          * If we are failing, make sure the ticket doesn't have any
2471          * current reservations. We don't want to add this back when
2472          * the ticket/transaction gets cancelled.
2473          */
2474         tic->t_curr_res = 0;
2475         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2476         GRANT_UNLOCK(log, s);
2477         return XFS_ERROR(EIO);
2478 }       /* xlog_grant_log_space */
2479
2480
2481 /*
2482  * Replenish the byte reservation required by moving the grant write head.
2483  *
2484  *
2485  */
2486 STATIC int
2487 xlog_regrant_write_log_space(xlog_t        *log,
2488                              xlog_ticket_t *tic)
2489 {
2490         SPLDECL(s);
2491         int             free_bytes, need_bytes;
2492         xlog_ticket_t   *ntic;
2493 #ifdef DEBUG
2494         xfs_lsn_t       tail_lsn;
2495 #endif
2496
2497         tic->t_curr_res = tic->t_unit_res;
2498         XLOG_TIC_RESET_RES(tic);
2499
2500         if (tic->t_cnt > 0)
2501                 return (0);
2502
2503 #ifdef DEBUG
2504         if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2505                 panic("regrant Recovery problem");
2506 #endif
2507
2508         s = GRANT_LOCK(log);
2509         xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter");
2510
2511         if (XLOG_FORCED_SHUTDOWN(log))
2512                 goto error_return;
2513
2514         /* If there are other waiters on the queue then give them a
2515          * chance at logspace before us. Wake up the first waiters,
2516          * if we do not wake up all the waiters then go to sleep waiting
2517          * for more free space, otherwise try to get some space for
2518          * this transaction.
2519          */
2520
2521         if ((ntic = log->l_write_headq)) {
2522                 free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
2523                                              log->l_grant_write_bytes);
2524                 do {
2525                         ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
2526
2527                         if (free_bytes < ntic->t_unit_res)
2528                                 break;
2529                         free_bytes -= ntic->t_unit_res;
2530                         sv_signal(&ntic->t_sema);
2531                         ntic = ntic->t_next;
2532                 } while (ntic != log->l_write_headq);
2533
2534                 if (ntic != log->l_write_headq) {
2535                         if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2536                                 XLOG_INS_TICKETQ(log->l_write_headq, tic);
2537
2538                         xlog_trace_loggrant(log, tic,
2539                                     "xlog_regrant_write_log_space: sleep 1");
2540                         XFS_STATS_INC(xs_sleep_logspace);
2541                         sv_wait(&tic->t_sema, PINOD|PLTWAIT,
2542                                 &log->l_grant_lock, s);
2543
2544                         /* If we're shutting down, this tic is already
2545                          * off the queue */
2546                         if (XLOG_FORCED_SHUTDOWN(log)) {
2547                                 s = GRANT_LOCK(log);
2548                                 goto error_return;
2549                         }
2550
2551                         xlog_trace_loggrant(log, tic,
2552                                     "xlog_regrant_write_log_space: wake 1");
2553                         xlog_grant_push_ail(log->l_mp, tic->t_unit_res);
2554                         s = GRANT_LOCK(log);
2555                 }
2556         }
2557
2558         need_bytes = tic->t_unit_res;
2559
2560 redo:
2561         if (XLOG_FORCED_SHUTDOWN(log))
2562                 goto error_return;
2563
2564         free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
2565                                      log->l_grant_write_bytes);
2566         if (free_bytes < need_bytes) {
2567                 if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
2568                         XLOG_INS_TICKETQ(log->l_write_headq, tic);
2569                 XFS_STATS_INC(xs_sleep_logspace);
2570                 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
2571
2572                 /* If we're shutting down, this tic is already off the queue */
2573                 if (XLOG_FORCED_SHUTDOWN(log)) {
2574                         s = GRANT_LOCK(log);
2575                         goto error_return;
2576                 }
2577
2578                 xlog_trace_loggrant(log, tic,
2579                                     "xlog_regrant_write_log_space: wake 2");
2580                 xlog_grant_push_ail(log->l_mp, need_bytes);
2581                 s = GRANT_LOCK(log);
2582                 goto redo;
2583         } else if (tic->t_flags & XLOG_TIC_IN_Q)
2584                 XLOG_DEL_TICKETQ(log->l_write_headq, tic);
2585
2586         XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */
2587 #ifdef DEBUG
2588         tail_lsn = log->l_tail_lsn;
2589         if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2590                 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2591                 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2592         }
2593 #endif
2594
2595         xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit");
2596         xlog_verify_grant_head(log, 1);
2597         GRANT_UNLOCK(log, s);
2598         return (0);
2599
2600
2601  error_return:
2602         if (tic->t_flags & XLOG_TIC_IN_Q)
2603                 XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
2604         xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
2605         /*
2606          * If we are failing, make sure the ticket doesn't have any
2607          * current reservations. We don't want to add this back when
2608          * the ticket/transaction gets cancelled.
2609          */
2610         tic->t_curr_res = 0;
2611         tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
2612         GRANT_UNLOCK(log, s);
2613         return XFS_ERROR(EIO);
2614 }       /* xlog_regrant_write_log_space */
2615
2616
2617 /* The first cnt-1 times through here we don't need to
2618  * move the grant write head because the permanent
2619  * reservation has reserved cnt times the unit amount.
2620  * Release part of current permanent unit reservation and
2621  * reset current reservation to be one units worth.  Also
2622  * move grant reservation head forward.
2623  */
2624 STATIC void
2625 xlog_regrant_reserve_log_space(xlog_t        *log,
2626                                xlog_ticket_t *ticket)
2627 {
2628         SPLDECL(s);
2629
2630         xlog_trace_loggrant(log, ticket,
2631                             "xlog_regrant_reserve_log_space: enter");
2632         if (ticket->t_cnt > 0)
2633                 ticket->t_cnt--;
2634
2635         s = GRANT_LOCK(log);
2636         XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
2637         XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2638         ticket->t_curr_res = ticket->t_unit_res;
2639         XLOG_TIC_RESET_RES(ticket);
2640         xlog_trace_loggrant(log, ticket,
2641                             "xlog_regrant_reserve_log_space: sub current res");
2642         xlog_verify_grant_head(log, 1);
2643
2644         /* just return if we still have some of the pre-reserved space */
2645         if (ticket->t_cnt > 0) {
2646                 GRANT_UNLOCK(log, s);
2647                 return;
2648         }
2649
2650         XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r');
2651         xlog_trace_loggrant(log, ticket,
2652                             "xlog_regrant_reserve_log_space: exit");
2653         xlog_verify_grant_head(log, 0);
2654         GRANT_UNLOCK(log, s);
2655         ticket->t_curr_res = ticket->t_unit_res;
2656         XLOG_TIC_RESET_RES(ticket);
2657 }       /* xlog_regrant_reserve_log_space */
2658
2659
2660 /*
2661  * Give back the space left from a reservation.
2662  *
2663  * All the information we need to make a correct determination of space left
2664  * is present.  For non-permanent reservations, things are quite easy.  The
2665  * count should have been decremented to zero.  We only need to deal with the
2666  * space remaining in the current reservation part of the ticket.  If the
2667  * ticket contains a permanent reservation, there may be left over space which
2668  * needs to be released.  A count of N means that N-1 refills of the current
2669  * reservation can be done before we need to ask for more space.  The first
2670  * one goes to fill up the first current reservation.  Once we run out of
2671  * space, the count will stay at zero and the only space remaining will be
2672  * in the current reservation field.
2673  */
2674 STATIC void
2675 xlog_ungrant_log_space(xlog_t        *log,
2676                        xlog_ticket_t *ticket)
2677 {
2678         SPLDECL(s);
2679
2680         if (ticket->t_cnt > 0)
2681                 ticket->t_cnt--;
2682
2683         s = GRANT_LOCK(log);
2684         xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
2685
2686         XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
2687         XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2688
2689         xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
2690
2691         /* If this is a permanent reservation ticket, we may be able to free
2692          * up more space based on the remaining count.
2693          */
2694         if (ticket->t_cnt > 0) {
2695                 ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
2696                 XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w');
2697                 XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r');
2698         }
2699
2700         xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
2701         xlog_verify_grant_head(log, 1);
2702         GRANT_UNLOCK(log, s);
2703         xfs_log_move_tail(log->l_mp, 1);
2704 }       /* xlog_ungrant_log_space */
2705
2706
2707 /*
2708  * Atomically put back used ticket.
2709  */
2710 void
2711 xlog_state_put_ticket(xlog_t        *log,
2712                       xlog_ticket_t *tic)
2713 {
2714         unsigned long s;
2715
2716         s = LOG_LOCK(log);
2717         xlog_ticket_put(log, tic);
2718         LOG_UNLOCK(log, s);
2719 }       /* xlog_state_put_ticket */
2720
2721 /*
2722  * Flush iclog to disk if this is the last reference to the given iclog and
2723  * the WANT_SYNC bit is set.
2724  *
2725  * When this function is entered, the iclog is not necessarily in the
2726  * WANT_SYNC state.  It may be sitting around waiting to get filled.
2727  *
2728  *
2729  */
2730 int
2731 xlog_state_release_iclog(xlog_t         *log,
2732                          xlog_in_core_t *iclog)
2733 {
2734         SPLDECL(s);
2735         int             sync = 0;       /* do we sync? */
2736
2737         xlog_assign_tail_lsn(log->l_mp);
2738
2739         s = LOG_LOCK(log);
2740
2741         if (iclog->ic_state & XLOG_STATE_IOERROR) {
2742                 LOG_UNLOCK(log, s);
2743                 return XFS_ERROR(EIO);
2744         }
2745
2746         ASSERT(iclog->ic_refcnt > 0);
2747         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE ||
2748                iclog->ic_state == XLOG_STATE_WANT_SYNC);
2749
2750         if (--iclog->ic_refcnt == 0 &&
2751             iclog->ic_state == XLOG_STATE_WANT_SYNC) {
2752                 sync++;
2753                 iclog->ic_state = XLOG_STATE_SYNCING;
2754                 INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn);
2755                 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
2756                 /* cycle incremented when incrementing curr_block */
2757         }
2758
2759         LOG_UNLOCK(log, s);
2760
2761         /*
2762          * We let the log lock go, so it's possible that we hit a log I/O
2763          * error or someother SHUTDOWN condition that marks the iclog
2764          * as XLOG_STATE_IOERROR before the bwrite. However, we know that
2765          * this iclog has consistent data, so we ignore IOERROR
2766          * flags after this point.
2767          */
2768         if (sync) {
2769                 return xlog_sync(log, iclog);
2770         }
2771         return (0);
2772
2773 }       /* xlog_state_release_iclog */
2774
2775
2776 /*
2777  * This routine will mark the current iclog in the ring as WANT_SYNC
2778  * and move the current iclog pointer to the next iclog in the ring.
2779  * When this routine is called from xlog_state_get_iclog_space(), the
2780  * exact size of the iclog has not yet been determined.  All we know is
2781  * that every data block.  We have run out of space in this log record.
2782  */
2783 STATIC void
2784 xlog_state_switch_iclogs(xlog_t         *log,
2785                          xlog_in_core_t *iclog,
2786                          int            eventual_size)
2787 {
2788         ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
2789         if (!eventual_size)
2790                 eventual_size = iclog->ic_offset;
2791         iclog->ic_state = XLOG_STATE_WANT_SYNC;
2792         INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block);
2793         log->l_prev_block = log->l_curr_block;
2794         log->l_prev_cycle = log->l_curr_cycle;
2795
2796         /* roll log?: ic_offset changed later */
2797         log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2798
2799         /* Round up to next log-sunit */
2800         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
2801             log->l_mp->m_sb.sb_logsunit > 1) {
2802                 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
2803                 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2804         }
2805
2806         if (log->l_curr_block >= log->l_logBBsize) {
2807                 log->l_curr_cycle++;
2808                 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2809                         log->l_curr_cycle++;
2810                 log->l_curr_block -= log->l_logBBsize;
2811                 ASSERT(log->l_curr_block >= 0);
2812         }
2813         ASSERT(iclog == log->l_iclog);
2814         log->l_iclog = iclog->ic_next;
2815 }       /* xlog_state_switch_iclogs */
2816
2817
2818 /*
2819  * Write out all data in the in-core log as of this exact moment in time.
2820  *
2821  * Data may be written to the in-core log during this call.  However,
2822  * we don't guarantee this data will be written out.  A change from past
2823  * implementation means this routine will *not* write out zero length LRs.
2824  *
2825  * Basically, we try and perform an intelligent scan of the in-core logs.
2826  * If we determine there is no flushable data, we just return.  There is no
2827  * flushable data if:
2828  *
2829  *      1. the current iclog is active and has no data; the previous iclog
2830  *              is in the active or dirty state.
2831  *      2. the current iclog is drity, and the previous iclog is in the
2832  *              active or dirty state.
2833  *
2834  * We may sleep (call psema) if:
2835  *
2836  *      1. the current iclog is not in the active nor dirty state.
2837  *      2. the current iclog dirty, and the previous iclog is not in the
2838  *              active nor dirty state.
2839  *      3. the current iclog is active, and there is another thread writing
2840  *              to this particular iclog.
2841  *      4. a) the current iclog is active and has no other writers
2842  *         b) when we return from flushing out this iclog, it is still
2843  *              not in the active nor dirty state.
2844  */
2845 STATIC int
2846 xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
2847 {
2848         xlog_in_core_t  *iclog;
2849         xfs_lsn_t       lsn;
2850         SPLDECL(s);
2851
2852         s = LOG_LOCK(log);
2853
2854         iclog = log->l_iclog;
2855         if (iclog->ic_state & XLOG_STATE_IOERROR) {
2856                 LOG_UNLOCK(log, s);
2857                 return XFS_ERROR(EIO);
2858         }
2859
2860         /* If the head iclog is not active nor dirty, we just attach
2861          * ourselves to the head and go to sleep.
2862          */
2863         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2864             iclog->ic_state == XLOG_STATE_DIRTY) {
2865                 /*
2866                  * If the head is dirty or (active and empty), then
2867                  * we need to look at the previous iclog.  If the previous
2868                  * iclog is active or dirty we are done.  There is nothing
2869                  * to sync out.  Otherwise, we attach ourselves to the
2870                  * previous iclog and go to sleep.
2871                  */
2872                 if (iclog->ic_state == XLOG_STATE_DIRTY ||
2873                     (iclog->ic_refcnt == 0 && iclog->ic_offset == 0)) {
2874                         iclog = iclog->ic_prev;
2875                         if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2876                             iclog->ic_state == XLOG_STATE_DIRTY)
2877                                 goto no_sleep;
2878                         else
2879                                 goto maybe_sleep;
2880                 } else {
2881                         if (iclog->ic_refcnt == 0) {
2882                                 /* We are the only one with access to this
2883                                  * iclog.  Flush it out now.  There should
2884                                  * be a roundoff of zero to show that someone
2885                                  * has already taken care of the roundoff from
2886                                  * the previous sync.
2887                                  */
2888                                 iclog->ic_refcnt++;
2889                                 lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
2890                                 xlog_state_switch_iclogs(log, iclog, 0);
2891                                 LOG_UNLOCK(log, s);
2892
2893                                 if (xlog_state_release_iclog(log, iclog))
2894                                         return XFS_ERROR(EIO);
2895                                 *log_flushed = 1;
2896                                 s = LOG_LOCK(log);
2897                                 if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn &&
2898                                     iclog->ic_state != XLOG_STATE_DIRTY)
2899                                         goto maybe_sleep;
2900                                 else
2901                                         goto no_sleep;
2902                         } else {
2903                                 /* Someone else is writing to this iclog.
2904                                  * Use its call to flush out the data.  However,
2905                                  * the other thread may not force out this LR,
2906                                  * so we mark it WANT_SYNC.
2907                                  */
2908                                 xlog_state_switch_iclogs(log, iclog, 0);
2909                                 goto maybe_sleep;
2910                         }
2911                 }
2912         }
2913
2914         /* By the time we come around again, the iclog could've been filled
2915          * which would give it another lsn.  If we have a new lsn, just
2916          * return because the relevant data has been flushed.
2917          */
2918 maybe_sleep:
2919         if (flags & XFS_LOG_SYNC) {
2920                 /*
2921                  * We must check if we're shutting down here, before
2922                  * we wait, while we're holding the LOG_LOCK.
2923                  * Then we check again after waking up, in case our
2924                  * sleep was disturbed by a bad news.
2925                  */
2926                 if (iclog->ic_state & XLOG_STATE_IOERROR) {
2927                         LOG_UNLOCK(log, s);
2928                         return XFS_ERROR(EIO);
2929                 }
2930                 XFS_STATS_INC(xs_log_force_sleep);
2931                 sv_wait(&iclog->ic_forcesema, PINOD, &log->l_icloglock, s);
2932                 /*
2933                  * No need to grab the log lock here since we're
2934                  * only deciding whether or not to return EIO
2935                  * and the memory read should be atomic.
2936                  */
2937                 if (iclog->ic_state & XLOG_STATE_IOERROR)
2938                         return XFS_ERROR(EIO);
2939                 *log_flushed = 1;
2940
2941         } else {
2942
2943 no_sleep:
2944                 LOG_UNLOCK(log, s);
2945         }
2946         return 0;
2947 }       /* xlog_state_sync_all */
2948
2949
2950 /*
2951  * Used by code which implements synchronous log forces.
2952  *
2953  * Find in-core log with lsn.
2954  *      If it is in the DIRTY state, just return.
2955  *      If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2956  *              state and go to sleep or return.
2957  *      If it is in any other state, go to sleep or return.
2958  *
2959  * If filesystem activity goes to zero, the iclog will get flushed only by
2960  * bdflush().
2961  */
2962 int
2963 xlog_state_sync(xlog_t    *log,
2964                 xfs_lsn_t lsn,
2965                 uint      flags,
2966                 int       *log_flushed)
2967 {
2968     xlog_in_core_t      *iclog;
2969     int                 already_slept = 0;
2970     SPLDECL(s);
2971
2972
2973 try_again:
2974     s = LOG_LOCK(log);
2975     iclog = log->l_iclog;
2976
2977     if (iclog->ic_state & XLOG_STATE_IOERROR) {
2978             LOG_UNLOCK(log, s);
2979             return XFS_ERROR(EIO);
2980     }
2981
2982     do {
2983         if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) {
2984             iclog = iclog->ic_next;
2985             continue;
2986         }
2987
2988         if (iclog->ic_state == XLOG_STATE_DIRTY) {
2989                 LOG_UNLOCK(log, s);
2990                 return 0;
2991         }
2992
2993         if (iclog->ic_state == XLOG_STATE_ACTIVE) {
2994                 /*
2995                  * We sleep here if we haven't already slept (e.g.
2996                  * this is the first time we've looked at the correct
2997                  * iclog buf) and the buffer before us is going to
2998                  * be sync'ed. The reason for this is that if we
2999                  * are doing sync transactions here, by waiting for
3000                  * the previous I/O to complete, we can allow a few
3001                  * more transactions into this iclog before we close
3002                  * it down.
3003                  *
3004                  * Otherwise, we mark the buffer WANT_SYNC, and bump
3005                  * up the refcnt so we can release the log (which drops
3006                  * the ref count).  The state switch keeps new transaction
3007                  * commits from using this buffer.  When the current commits
3008                  * finish writing into the buffer, the refcount will drop to
3009                  * zero and the buffer will go out then.
3010                  */
3011                 if (!already_slept &&
3012                     (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
3013                                                  XLOG_STATE_SYNCING))) {
3014                         ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
3015                         XFS_STATS_INC(xs_log_force_sleep);
3016                         sv_wait(&iclog->ic_prev->ic_writesema, PSWP,
3017                                 &log->l_icloglock, s);
3018                         *log_flushed = 1;
3019                         already_slept = 1;
3020                         goto try_again;
3021                 } else {
3022                         iclog->ic_refcnt++;
3023                         xlog_state_switch_iclogs(log, iclog, 0);
3024                         LOG_UNLOCK(log, s);
3025                         if (xlog_state_release_iclog(log, iclog))
3026                                 return XFS_ERROR(EIO);
3027                         *log_flushed = 1;
3028                         s = LOG_LOCK(log);
3029                 }
3030         }
3031
3032         if ((flags & XFS_LOG_SYNC) && /* sleep */
3033             !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3034
3035                 /*
3036                  * Don't wait on the forcesema if we know that we've
3037                  * gotten a log write error.
3038                  */
3039                 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3040                         LOG_UNLOCK(log, s);
3041                         return XFS_ERROR(EIO);
3042                 }
3043                 XFS_STATS_INC(xs_log_force_sleep);
3044                 sv_wait(&iclog->ic_forcesema, PSWP, &log->l_icloglock, s);
3045                 /*
3046                  * No need to grab the log lock here since we're
3047                  * only deciding whether or not to return EIO
3048                  * and the memory read should be atomic.
3049                  */
3050                 if (iclog->ic_state & XLOG_STATE_IOERROR)
3051                         return XFS_ERROR(EIO);
3052                 *log_flushed = 1;
3053         } else {                /* just return */
3054                 LOG_UNLOCK(log, s);
3055         }
3056         return 0;
3057
3058     } while (iclog != log->l_iclog);
3059
3060     LOG_UNLOCK(log, s);
3061     return (0);
3062 }       /* xlog_state_sync */
3063
3064
3065 /*
3066  * Called when we want to mark the current iclog as being ready to sync to
3067  * disk.
3068  */
3069 void
3070 xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3071 {
3072         SPLDECL(s);
3073
3074         s = LOG_LOCK(log);
3075
3076         if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3077                 xlog_state_switch_iclogs(log, iclog, 0);
3078         } else {
3079                 ASSERT(iclog->ic_state &
3080                         (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR));
3081         }
3082
3083         LOG_UNLOCK(log, s);
3084 }       /* xlog_state_want_sync */
3085
3086
3087
3088 /*****************************************************************************
3089  *
3090  *              TICKET functions
3091  *
3092  *****************************************************************************
3093  */
3094
3095 /*
3096  *      Algorithm doesn't take into account page size. ;-(
3097  */
3098 STATIC void
3099 xlog_state_ticket_alloc(xlog_t *log)
3100 {
3101         xlog_ticket_t   *t_list;
3102         xlog_ticket_t   *next;
3103         xfs_caddr_t     buf;
3104         uint            i = (NBPP / sizeof(xlog_ticket_t)) - 2;
3105         SPLDECL(s);
3106
3107         /*
3108          * The kmem_zalloc may sleep, so we shouldn't be holding the
3109          * global lock.  XXXmiken: may want to use zone allocator.
3110          */
3111         buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP);
3112
3113         s = LOG_LOCK(log);
3114
3115         /* Attach 1st ticket to Q, so we can keep track of allocated memory */
3116         t_list = (xlog_ticket_t *)buf;
3117         t_list->t_next = log->l_unmount_free;
3118         log->l_unmount_free = t_list++;
3119         log->l_ticket_cnt++;
3120         log->l_ticket_tcnt++;
3121
3122         /* Next ticket becomes first ticket attached to ticket free list */
3123         if (log->l_freelist != NULL) {
3124                 ASSERT(log->l_tail != NULL);
3125                 log->l_tail->t_next = t_list;
3126         } else {
3127                 log->l_freelist = t_list;
3128         }
3129         log->l_ticket_cnt++;
3130         log->l_ticket_tcnt++;
3131
3132         /* Cycle through rest of alloc'ed memory, building up free Q */
3133         for ( ; i > 0; i--) {
3134                 next = t_list + 1;
3135                 t_list->t_next = next;
3136                 t_list = next;
3137                 log->l_ticket_cnt++;
3138                 log->l_ticket_tcnt++;
3139         }
3140         t_list->t_next = NULL;
3141         log->l_tail = t_list;
3142         LOG_UNLOCK(log, s);
3143 }       /* xlog_state_ticket_alloc */
3144
3145
3146 /*
3147  * Put ticket into free list
3148  *
3149  * Assumption: log lock is held around this call.
3150  */
3151 STATIC void
3152 xlog_ticket_put(xlog_t          *log,
3153                 xlog_ticket_t   *ticket)
3154 {
3155         sv_destroy(&ticket->t_sema);
3156
3157         /*
3158          * Don't think caching will make that much difference.  It's
3159          * more important to make debug easier.
3160          */
3161 #if 0
3162         /* real code will want to use LIFO for caching */
3163         ticket->t_next = log->l_freelist;
3164         log->l_freelist = ticket;
3165         /* no need to clear fields */
3166 #else
3167         /* When we debug, it is easier if tickets are cycled */
3168         ticket->t_next     = NULL;
3169         if (log->l_tail != 0) {
3170                 log->l_tail->t_next = ticket;
3171         } else {
3172                 ASSERT(log->l_freelist == 0);
3173                 log->l_freelist = ticket;
3174         }
3175         log->l_tail         = ticket;
3176 #endif /* DEBUG */
3177         log->l_ticket_cnt++;
3178 }       /* xlog_ticket_put */
3179
3180
3181 /*
3182  * Grab ticket off freelist or allocation some more
3183  */
3184 xlog_ticket_t *
3185 xlog_ticket_get(xlog_t          *log,
3186                 int             unit_bytes,
3187                 int             cnt,
3188                 char            client,
3189                 uint            xflags)
3190 {
3191         xlog_ticket_t   *tic;
3192         uint            num_headers;
3193         SPLDECL(s);
3194
3195  alloc:
3196         if (log->l_freelist == NULL)
3197                 xlog_state_ticket_alloc(log);           /* potentially sleep */
3198
3199         s = LOG_LOCK(log);
3200         if (log->l_freelist == NULL) {
3201                 LOG_UNLOCK(log, s);
3202                 goto alloc;
3203         }
3204         tic             = log->l_freelist;
3205         log->l_freelist = tic->t_next;
3206         if (log->l_freelist == NULL)
3207                 log->l_tail = NULL;
3208         log->l_ticket_cnt--;
3209         LOG_UNLOCK(log, s);
3210
3211         /*
3212          * Permanent reservations have up to 'cnt'-1 active log operations
3213          * in the log.  A unit in this case is the amount of space for one
3214          * of these log operations.  Normal reservations have a cnt of 1
3215          * and their unit amount is the total amount of space required.
3216          *
3217          * The following lines of code account for non-transaction data
3218          * which occupy space in the on-disk log.
3219          *
3220          * Normal form of a transaction is:
3221          * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3222          * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3223          *
3224          * We need to account for all the leadup data and trailer data
3225          * around the transaction data.
3226          * And then we need to account for the worst case in terms of using
3227          * more space.
3228          * The worst case will happen if:
3229          * - the placement of the transaction happens to be such that the
3230          *   roundoff is at its maximum
3231          * - the transaction data is synced before the commit record is synced
3232          *   i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3233          *   Therefore the commit record is in its own Log Record.
3234          *   This can happen as the commit record is called with its
3235          *   own region to xlog_write().
3236          *   This then means that in the worst case, roundoff can happen for
3237          *   the commit-rec as well.
3238          *   The commit-rec is smaller than padding in this scenario and so it is
3239          *   not added separately.
3240          */
3241
3242         /* for trans header */
3243         unit_bytes += sizeof(xlog_op_header_t);
3244         unit_bytes += sizeof(xfs_trans_header_t);
3245
3246         /* for start-rec */
3247         unit_bytes += sizeof(xlog_op_header_t);
3248
3249         /* for LR headers */
3250         num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
3251         unit_bytes += log->l_iclog_hsize * num_headers;
3252
3253         /* for commit-rec LR header - note: padding will subsume the ophdr */
3254         unit_bytes += log->l_iclog_hsize;
3255
3256         /* for split-recs - ophdrs added when data split over LRs */
3257         unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3258
3259         /* for roundoff padding for transaction data and one for commit record */
3260         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
3261             log->l_mp->m_sb.sb_logsunit > 1) {
3262                 /* log su roundoff */
3263                 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3264         } else {
3265                 /* BB roundoff */
3266                 unit_bytes += 2*BBSIZE;
3267         }
3268
3269         tic->t_unit_res         = unit_bytes;
3270         tic->t_curr_res         = unit_bytes;
3271         tic->t_cnt              = cnt;
3272         tic->t_ocnt             = cnt;
3273         tic->t_tid              = (xlog_tid_t)((__psint_t)tic & 0xffffffff);
3274         tic->t_clientid         = client;
3275         tic->t_flags            = XLOG_TIC_INITED;
3276         tic->t_trans_type       = 0;
3277         if (xflags & XFS_LOG_PERM_RESERV)
3278                 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3279         sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
3280
3281         XLOG_TIC_RESET_RES(tic);
3282
3283         return tic;
3284 }       /* xlog_ticket_get */
3285
3286
3287 /******************************************************************************
3288  *
3289  *              Log debug routines
3290  *
3291  ******************************************************************************
3292  */
3293 #if defined(DEBUG)
3294 /*
3295  * Make sure that the destination ptr is within the valid data region of
3296  * one of the iclogs.  This uses backup pointers stored in a different
3297  * part of the log in case we trash the log structure.
3298  */
3299 void
3300 xlog_verify_dest_ptr(xlog_t     *log,
3301                      __psint_t  ptr)
3302 {
3303         int i;
3304         int good_ptr = 0;
3305
3306         for (i=0; i < log->l_iclog_bufs; i++) {
3307                 if (ptr >= (__psint_t)log->l_iclog_bak[i] &&
3308                     ptr <= (__psint_t)log->l_iclog_bak[i]+log->l_iclog_size)
3309                         good_ptr++;
3310         }
3311         if (! good_ptr)
3312                 xlog_panic("xlog_verify_dest_ptr: invalid ptr");
3313 }       /* xlog_verify_dest_ptr */
3314
3315 STATIC void
3316 xlog_verify_grant_head(xlog_t *log, int equals)
3317 {
3318     if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
3319         if (equals)
3320             ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
3321         else
3322             ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
3323     } else {
3324         ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
3325         ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
3326     }
3327 }       /* xlog_verify_grant_head */
3328
3329 /* check if it will fit */
3330 STATIC void
3331 xlog_verify_tail_lsn(xlog_t         *log,
3332                      xlog_in_core_t *iclog,
3333                      xfs_lsn_t      tail_lsn)
3334 {
3335     int blocks;
3336
3337     if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3338         blocks =
3339             log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3340         if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3341             xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3342     } else {
3343         ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3344
3345         if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3346             xlog_panic("xlog_verify_tail_lsn: tail wrapped");
3347
3348         blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3349         if (blocks < BTOBB(iclog->ic_offset) + 1)
3350             xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3351     }
3352 }       /* xlog_verify_tail_lsn */
3353
3354 /*
3355  * Perform a number of checks on the iclog before writing to disk.
3356  *
3357  * 1. Make sure the iclogs are still circular
3358  * 2. Make sure we have a good magic number
3359  * 3. Make sure we don't have magic numbers in the data
3360  * 4. Check fields of each log operation header for:
3361  *      A. Valid client identifier
3362  *      B. tid ptr value falls in valid ptr space (user space code)
3363  *      C. Length in log record header is correct according to the
3364  *              individual operation headers within record.
3365  * 5. When a bwrite will occur within 5 blocks of the front of the physical
3366  *      log, check the preceding blocks of the physical log to make sure all
3367  *      the cycle numbers agree with the current cycle number.
3368  */
3369 STATIC void
3370 xlog_verify_iclog(xlog_t         *log,
3371                   xlog_in_core_t *iclog,
3372                   int            count,
3373                   boolean_t      syncing)
3374 {
3375         xlog_op_header_t        *ophead;
3376         xlog_in_core_t          *icptr;
3377         xlog_in_core_2_t        *xhdr;
3378         xfs_caddr_t             ptr;
3379         xfs_caddr_t             base_ptr;
3380         __psint_t               field_offset;
3381         __uint8_t               clientid;
3382         int                     len, i, j, k, op_len;
3383         int                     idx;
3384         SPLDECL(s);
3385
3386         /* check validity of iclog pointers */
3387         s = LOG_LOCK(log);
3388         icptr = log->l_iclog;
3389         for (i=0; i < log->l_iclog_bufs; i++) {
3390                 if (icptr == 0)
3391                         xlog_panic("xlog_verify_iclog: invalid ptr");
3392                 icptr = icptr->ic_next;
3393         }
3394         if (icptr != log->l_iclog)
3395                 xlog_panic("xlog_verify_iclog: corrupt iclog ring");
3396         LOG_UNLOCK(log, s);
3397
3398         /* check log magic numbers */
3399         ptr = (xfs_caddr_t) &(iclog->ic_header);
3400         if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM)
3401                 xlog_panic("xlog_verify_iclog: invalid magic num");
3402
3403         for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count;
3404              ptr += BBSIZE) {
3405                 if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM)
3406                         xlog_panic("xlog_verify_iclog: unexpected magic num");
3407         }
3408
3409         /* check fields */
3410         len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT);
3411         ptr = iclog->ic_datap;
3412         base_ptr = ptr;
3413         ophead = (xlog_op_header_t *)ptr;
3414         xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
3415         for (i = 0; i < len; i++) {
3416                 ophead = (xlog_op_header_t *)ptr;
3417
3418                 /* clientid is only 1 byte */
3419                 field_offset = (__psint_t)
3420                                ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
3421                 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3422                         clientid = ophead->oh_clientid;
3423                 } else {
3424                         idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
3425                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3426                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3427                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3428                                 clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
3429                         } else {
3430                                 clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
3431                         }
3432                 }
3433                 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG)
3434                         cmn_err(CE_WARN, "xlog_verify_iclog: "
3435                                 "invalid clientid %d op 0x%p offset 0x%lx",
3436                                 clientid, ophead, (unsigned long)field_offset);
3437
3438                 /* check length */
3439                 field_offset = (__psint_t)
3440                                ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
3441                 if (syncing == B_FALSE || (field_offset & 0x1ff)) {
3442                         op_len = INT_GET(ophead->oh_len, ARCH_CONVERT);
3443                 } else {
3444                         idx = BTOBBT((__psint_t)&ophead->oh_len -
3445                                     (__psint_t)iclog->ic_datap);
3446                         if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) {
3447                                 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3448                                 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3449                                 op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT);
3450                         } else {
3451                                 op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT);
3452                         }
3453                 }
3454                 ptr += sizeof(xlog_op_header_t) + op_len;
3455         }
3456 }       /* xlog_verify_iclog */
3457 #endif
3458
3459 /*
3460  * Mark all iclogs IOERROR. LOG_LOCK is held by the caller.
3461  */
3462 STATIC int
3463 xlog_state_ioerror(
3464         xlog_t  *log)
3465 {
3466         xlog_in_core_t  *iclog, *ic;
3467
3468         iclog = log->l_iclog;
3469         if (! (iclog->ic_state & XLOG_STATE_IOERROR)) {
3470                 /*
3471                  * Mark all the incore logs IOERROR.
3472                  * From now on, no log flushes will result.
3473                  */
3474                 ic = iclog;
3475                 do {
3476                         ic->ic_state = XLOG_STATE_IOERROR;
3477                         ic = ic->ic_next;
3478                 } while (ic != iclog);
3479                 return (0);
3480         }
3481         /*
3482          * Return non-zero, if state transition has already happened.
3483          */
3484         return (1);
3485 }
3486
3487 /*
3488  * This is called from xfs_force_shutdown, when we're forcibly
3489  * shutting down the filesystem, typically because of an IO error.
3490  * Our main objectives here are to make sure that:
3491  *      a. the filesystem gets marked 'SHUTDOWN' for all interested
3492  *         parties to find out, 'atomically'.
3493  *      b. those who're sleeping on log reservations, pinned objects and
3494  *          other resources get woken up, and be told the bad news.
3495  *      c. nothing new gets queued up after (a) and (b) are done.
3496  *      d. if !logerror, flush the iclogs to disk, then seal them off
3497  *         for business.
3498  */
3499 int
3500 xfs_log_force_umount(
3501         struct xfs_mount        *mp,
3502         int                     logerror)
3503 {
3504         xlog_ticket_t   *tic;
3505         xlog_t          *log;
3506         int             retval;
3507         int             dummy;
3508         SPLDECL(s);
3509         SPLDECL(s2);
3510
3511         log = mp->m_log;
3512
3513         /*
3514          * If this happens during log recovery, don't worry about
3515          * locking; the log isn't open for business yet.
3516          */
3517         if (!log ||
3518             log->l_flags & XLOG_ACTIVE_RECOVERY) {
3519                 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3520                 XFS_BUF_DONE(mp->m_sb_bp);
3521                 return (0);
3522         }
3523
3524         /*
3525          * Somebody could've already done the hard work for us.
3526          * No need to get locks for this.
3527          */
3528         if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3529                 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3530                 return (1);
3531         }
3532         retval = 0;
3533         /*
3534          * We must hold both the GRANT lock and the LOG lock,
3535          * before we mark the filesystem SHUTDOWN and wake
3536          * everybody up to tell the bad news.
3537          */
3538         s = GRANT_LOCK(log);
3539         s2 = LOG_LOCK(log);
3540         mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
3541         XFS_BUF_DONE(mp->m_sb_bp);
3542         /*
3543          * This flag is sort of redundant because of the mount flag, but
3544          * it's good to maintain the separation between the log and the rest
3545          * of XFS.
3546          */
3547         log->l_flags |= XLOG_IO_ERROR;
3548
3549         /*
3550          * If we hit a log error, we want to mark all the iclogs IOERROR
3551          * while we're still holding the loglock.
3552          */
3553         if (logerror)
3554                 retval = xlog_state_ioerror(log);
3555         LOG_UNLOCK(log, s2);
3556
3557         /*
3558          * We don't want anybody waiting for log reservations
3559          * after this. That means we have to wake up everybody
3560          * queued up on reserve_headq as well as write_headq.
3561          * In addition, we make sure in xlog_{re}grant_log_space
3562          * that we don't enqueue anything once the SHUTDOWN flag
3563          * is set, and this action is protected by the GRANTLOCK.
3564          */
3565         if ((tic = log->l_reserve_headq)) {
3566                 do {
3567                         sv_signal(&tic->t_sema);
3568                         tic = tic->t_next;
3569                 } while (tic != log->l_reserve_headq);
3570         }
3571
3572         if ((tic = log->l_write_headq)) {
3573                 do {
3574                         sv_signal(&tic->t_sema);
3575                         tic = tic->t_next;
3576                 } while (tic != log->l_write_headq);
3577         }
3578         GRANT_UNLOCK(log, s);
3579
3580         if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3581                 ASSERT(!logerror);
3582                 /*
3583                  * Force the incore logs to disk before shutting the
3584                  * log down completely.
3585                  */
3586                 xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy);
3587                 s2 = LOG_LOCK(log);
3588                 retval = xlog_state_ioerror(log);
3589                 LOG_UNLOCK(log, s2);
3590         }
3591         /*
3592          * Wake up everybody waiting on xfs_log_force.
3593          * Callback all log item committed functions as if the
3594          * log writes were completed.
3595          */
3596         xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3597
3598 #ifdef XFSERRORDEBUG
3599         {
3600                 xlog_in_core_t  *iclog;
3601
3602                 s = LOG_LOCK(log);
3603                 iclog = log->l_iclog;
3604                 do {
3605                         ASSERT(iclog->ic_callback == 0);
3606                         iclog = iclog->ic_next;
3607                 } while (iclog != log->l_iclog);
3608                 LOG_UNLOCK(log, s);
3609         }
3610 #endif
3611         /* return non-zero if log IOERROR transition had already happened */
3612         return (retval);
3613 }
3614
3615 STATIC int
3616 xlog_iclogs_empty(xlog_t *log)
3617 {
3618         xlog_in_core_t  *iclog;
3619
3620         iclog = log->l_iclog;
3621         do {
3622                 /* endianness does not matter here, zero is zero in
3623                  * any language.
3624                  */
3625                 if (iclog->ic_header.h_num_logops)
3626                         return(0);
3627                 iclog = iclog->ic_next;
3628         } while (iclog != log->l_iclog);
3629         return(1);
3630 }