Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[linux-2.6] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_log_priv.h"
42 #include "xfs_buf_item.h"
43 #include "xfs_log_recover.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_trans_priv.h"
46 #include "xfs_quota.h"
47 #include "xfs_rw.h"
48 #include "xfs_utils.h"
49
50 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
51 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
52 STATIC void     xlog_recover_insert_item_backq(xlog_recover_item_t **q,
53                                                xlog_recover_item_t *item);
54 #if defined(DEBUG)
55 STATIC void     xlog_recover_check_summary(xlog_t *);
56 #else
57 #define xlog_recover_check_summary(log)
58 #endif
59
60
61 /*
62  * Sector aligned buffer routines for buffer create/read/write/access
63  */
64
65 #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)   \
66         ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
67         ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
68 #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)   ((bno) & ~(log)->l_sectbb_mask)
69
70 xfs_buf_t *
71 xlog_get_bp(
72         xlog_t          *log,
73         int             num_bblks)
74 {
75         ASSERT(num_bblks > 0);
76
77         if (log->l_sectbb_log) {
78                 if (num_bblks > 1)
79                         num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
80                 num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
81         }
82         return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
83 }
84
85 void
86 xlog_put_bp(
87         xfs_buf_t       *bp)
88 {
89         xfs_buf_free(bp);
90 }
91
92
93 /*
94  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
95  */
96 int
97 xlog_bread(
98         xlog_t          *log,
99         xfs_daddr_t     blk_no,
100         int             nbblks,
101         xfs_buf_t       *bp)
102 {
103         int             error;
104
105         if (log->l_sectbb_log) {
106                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
107                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
108         }
109
110         ASSERT(nbblks > 0);
111         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
112         ASSERT(bp);
113
114         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
115         XFS_BUF_READ(bp);
116         XFS_BUF_BUSY(bp);
117         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
118         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
119
120         xfsbdstrat(log->l_mp, bp);
121         error = xfs_iowait(bp);
122         if (error)
123                 xfs_ioerror_alert("xlog_bread", log->l_mp,
124                                   bp, XFS_BUF_ADDR(bp));
125         return error;
126 }
127
128 /*
129  * Write out the buffer at the given block for the given number of blocks.
130  * The buffer is kept locked across the write and is returned locked.
131  * This can only be used for synchronous log writes.
132  */
133 STATIC int
134 xlog_bwrite(
135         xlog_t          *log,
136         xfs_daddr_t     blk_no,
137         int             nbblks,
138         xfs_buf_t       *bp)
139 {
140         int             error;
141
142         if (log->l_sectbb_log) {
143                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
144                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
145         }
146
147         ASSERT(nbblks > 0);
148         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
149
150         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
151         XFS_BUF_ZEROFLAGS(bp);
152         XFS_BUF_BUSY(bp);
153         XFS_BUF_HOLD(bp);
154         XFS_BUF_PSEMA(bp, PRIBIO);
155         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
156         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
157
158         if ((error = xfs_bwrite(log->l_mp, bp)))
159                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
160                                   bp, XFS_BUF_ADDR(bp));
161         return error;
162 }
163
164 STATIC xfs_caddr_t
165 xlog_align(
166         xlog_t          *log,
167         xfs_daddr_t     blk_no,
168         int             nbblks,
169         xfs_buf_t       *bp)
170 {
171         xfs_caddr_t     ptr;
172
173         if (!log->l_sectbb_log)
174                 return XFS_BUF_PTR(bp);
175
176         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
177         ASSERT(XFS_BUF_SIZE(bp) >=
178                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
179         return ptr;
180 }
181
182 #ifdef DEBUG
183 /*
184  * dump debug superblock and log record information
185  */
186 STATIC void
187 xlog_header_check_dump(
188         xfs_mount_t             *mp,
189         xlog_rec_header_t       *head)
190 {
191         int                     b;
192
193         cmn_err(CE_DEBUG, "%s:  SB : uuid = ", __func__);
194         for (b = 0; b < 16; b++)
195                 cmn_err(CE_DEBUG, "%02x", ((uchar_t *)&mp->m_sb.sb_uuid)[b]);
196         cmn_err(CE_DEBUG, ", fmt = %d\n", XLOG_FMT);
197         cmn_err(CE_DEBUG, "    log : uuid = ");
198         for (b = 0; b < 16; b++)
199                 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]);
200         cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt));
201 }
202 #else
203 #define xlog_header_check_dump(mp, head)
204 #endif
205
206 /*
207  * check log record header for recovery
208  */
209 STATIC int
210 xlog_header_check_recover(
211         xfs_mount_t             *mp,
212         xlog_rec_header_t       *head)
213 {
214         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
215
216         /*
217          * IRIX doesn't write the h_fmt field and leaves it zeroed
218          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
219          * a dirty log created in IRIX.
220          */
221         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
222                 xlog_warn(
223         "XFS: dirty log written in incompatible format - can't recover");
224                 xlog_header_check_dump(mp, head);
225                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
226                                  XFS_ERRLEVEL_HIGH, mp);
227                 return XFS_ERROR(EFSCORRUPTED);
228         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
229                 xlog_warn(
230         "XFS: dirty log entry has mismatched uuid - can't recover");
231                 xlog_header_check_dump(mp, head);
232                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
233                                  XFS_ERRLEVEL_HIGH, mp);
234                 return XFS_ERROR(EFSCORRUPTED);
235         }
236         return 0;
237 }
238
239 /*
240  * read the head block of the log and check the header
241  */
242 STATIC int
243 xlog_header_check_mount(
244         xfs_mount_t             *mp,
245         xlog_rec_header_t       *head)
246 {
247         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
248
249         if (uuid_is_nil(&head->h_fs_uuid)) {
250                 /*
251                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
252                  * h_fs_uuid is nil, we assume this log was last mounted
253                  * by IRIX and continue.
254                  */
255                 xlog_warn("XFS: nil uuid in log - IRIX style log");
256         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
257                 xlog_warn("XFS: log has mismatched uuid - can't recover");
258                 xlog_header_check_dump(mp, head);
259                 XFS_ERROR_REPORT("xlog_header_check_mount",
260                                  XFS_ERRLEVEL_HIGH, mp);
261                 return XFS_ERROR(EFSCORRUPTED);
262         }
263         return 0;
264 }
265
266 STATIC void
267 xlog_recover_iodone(
268         struct xfs_buf  *bp)
269 {
270         if (XFS_BUF_GETERROR(bp)) {
271                 /*
272                  * We're not going to bother about retrying
273                  * this during recovery. One strike!
274                  */
275                 xfs_ioerror_alert("xlog_recover_iodone",
276                                   bp->b_mount, bp, XFS_BUF_ADDR(bp));
277                 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
278         }
279         bp->b_mount = NULL;
280         XFS_BUF_CLR_IODONE_FUNC(bp);
281         xfs_biodone(bp);
282 }
283
284 /*
285  * This routine finds (to an approximation) the first block in the physical
286  * log which contains the given cycle.  It uses a binary search algorithm.
287  * Note that the algorithm can not be perfect because the disk will not
288  * necessarily be perfect.
289  */
290 STATIC int
291 xlog_find_cycle_start(
292         xlog_t          *log,
293         xfs_buf_t       *bp,
294         xfs_daddr_t     first_blk,
295         xfs_daddr_t     *last_blk,
296         uint            cycle)
297 {
298         xfs_caddr_t     offset;
299         xfs_daddr_t     mid_blk;
300         uint            mid_cycle;
301         int             error;
302
303         mid_blk = BLK_AVG(first_blk, *last_blk);
304         while (mid_blk != first_blk && mid_blk != *last_blk) {
305                 if ((error = xlog_bread(log, mid_blk, 1, bp)))
306                         return error;
307                 offset = xlog_align(log, mid_blk, 1, bp);
308                 mid_cycle = xlog_get_cycle(offset);
309                 if (mid_cycle == cycle) {
310                         *last_blk = mid_blk;
311                         /* last_half_cycle == mid_cycle */
312                 } else {
313                         first_blk = mid_blk;
314                         /* first_half_cycle == mid_cycle */
315                 }
316                 mid_blk = BLK_AVG(first_blk, *last_blk);
317         }
318         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
319                (mid_blk == *last_blk && mid_blk-1 == first_blk));
320
321         return 0;
322 }
323
324 /*
325  * Check that the range of blocks does not contain the cycle number
326  * given.  The scan needs to occur from front to back and the ptr into the
327  * region must be updated since a later routine will need to perform another
328  * test.  If the region is completely good, we end up returning the same
329  * last block number.
330  *
331  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
332  * since we don't ever expect logs to get this large.
333  */
334 STATIC int
335 xlog_find_verify_cycle(
336         xlog_t          *log,
337         xfs_daddr_t     start_blk,
338         int             nbblks,
339         uint            stop_on_cycle_no,
340         xfs_daddr_t     *new_blk)
341 {
342         xfs_daddr_t     i, j;
343         uint            cycle;
344         xfs_buf_t       *bp;
345         xfs_daddr_t     bufblks;
346         xfs_caddr_t     buf = NULL;
347         int             error = 0;
348
349         bufblks = 1 << ffs(nbblks);
350
351         while (!(bp = xlog_get_bp(log, bufblks))) {
352                 /* can't get enough memory to do everything in one big buffer */
353                 bufblks >>= 1;
354                 if (bufblks <= log->l_sectbb_log)
355                         return ENOMEM;
356         }
357
358         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
359                 int     bcount;
360
361                 bcount = min(bufblks, (start_blk + nbblks - i));
362
363                 if ((error = xlog_bread(log, i, bcount, bp)))
364                         goto out;
365
366                 buf = xlog_align(log, i, bcount, bp);
367                 for (j = 0; j < bcount; j++) {
368                         cycle = xlog_get_cycle(buf);
369                         if (cycle == stop_on_cycle_no) {
370                                 *new_blk = i+j;
371                                 goto out;
372                         }
373
374                         buf += BBSIZE;
375                 }
376         }
377
378         *new_blk = -1;
379
380 out:
381         xlog_put_bp(bp);
382         return error;
383 }
384
385 /*
386  * Potentially backup over partial log record write.
387  *
388  * In the typical case, last_blk is the number of the block directly after
389  * a good log record.  Therefore, we subtract one to get the block number
390  * of the last block in the given buffer.  extra_bblks contains the number
391  * of blocks we would have read on a previous read.  This happens when the
392  * last log record is split over the end of the physical log.
393  *
394  * extra_bblks is the number of blocks potentially verified on a previous
395  * call to this routine.
396  */
397 STATIC int
398 xlog_find_verify_log_record(
399         xlog_t                  *log,
400         xfs_daddr_t             start_blk,
401         xfs_daddr_t             *last_blk,
402         int                     extra_bblks)
403 {
404         xfs_daddr_t             i;
405         xfs_buf_t               *bp;
406         xfs_caddr_t             offset = NULL;
407         xlog_rec_header_t       *head = NULL;
408         int                     error = 0;
409         int                     smallmem = 0;
410         int                     num_blks = *last_blk - start_blk;
411         int                     xhdrs;
412
413         ASSERT(start_blk != 0 || *last_blk != start_blk);
414
415         if (!(bp = xlog_get_bp(log, num_blks))) {
416                 if (!(bp = xlog_get_bp(log, 1)))
417                         return ENOMEM;
418                 smallmem = 1;
419         } else {
420                 if ((error = xlog_bread(log, start_blk, num_blks, bp)))
421                         goto out;
422                 offset = xlog_align(log, start_blk, num_blks, bp);
423                 offset += ((num_blks - 1) << BBSHIFT);
424         }
425
426         for (i = (*last_blk) - 1; i >= 0; i--) {
427                 if (i < start_blk) {
428                         /* valid log record not found */
429                         xlog_warn(
430                 "XFS: Log inconsistent (didn't find previous header)");
431                         ASSERT(0);
432                         error = XFS_ERROR(EIO);
433                         goto out;
434                 }
435
436                 if (smallmem) {
437                         if ((error = xlog_bread(log, i, 1, bp)))
438                                 goto out;
439                         offset = xlog_align(log, i, 1, bp);
440                 }
441
442                 head = (xlog_rec_header_t *)offset;
443
444                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
445                         break;
446
447                 if (!smallmem)
448                         offset -= BBSIZE;
449         }
450
451         /*
452          * We hit the beginning of the physical log & still no header.  Return
453          * to caller.  If caller can handle a return of -1, then this routine
454          * will be called again for the end of the physical log.
455          */
456         if (i == -1) {
457                 error = -1;
458                 goto out;
459         }
460
461         /*
462          * We have the final block of the good log (the first block
463          * of the log record _before_ the head. So we check the uuid.
464          */
465         if ((error = xlog_header_check_mount(log->l_mp, head)))
466                 goto out;
467
468         /*
469          * We may have found a log record header before we expected one.
470          * last_blk will be the 1st block # with a given cycle #.  We may end
471          * up reading an entire log record.  In this case, we don't want to
472          * reset last_blk.  Only when last_blk points in the middle of a log
473          * record do we update last_blk.
474          */
475         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
476                 uint    h_size = be32_to_cpu(head->h_size);
477
478                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
479                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
480                         xhdrs++;
481         } else {
482                 xhdrs = 1;
483         }
484
485         if (*last_blk - i + extra_bblks !=
486             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
487                 *last_blk = i;
488
489 out:
490         xlog_put_bp(bp);
491         return error;
492 }
493
494 /*
495  * Head is defined to be the point of the log where the next log write
496  * write could go.  This means that incomplete LR writes at the end are
497  * eliminated when calculating the head.  We aren't guaranteed that previous
498  * LR have complete transactions.  We only know that a cycle number of
499  * current cycle number -1 won't be present in the log if we start writing
500  * from our current block number.
501  *
502  * last_blk contains the block number of the first block with a given
503  * cycle number.
504  *
505  * Return: zero if normal, non-zero if error.
506  */
507 STATIC int
508 xlog_find_head(
509         xlog_t          *log,
510         xfs_daddr_t     *return_head_blk)
511 {
512         xfs_buf_t       *bp;
513         xfs_caddr_t     offset;
514         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
515         int             num_scan_bblks;
516         uint            first_half_cycle, last_half_cycle;
517         uint            stop_on_cycle;
518         int             error, log_bbnum = log->l_logBBsize;
519
520         /* Is the end of the log device zeroed? */
521         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
522                 *return_head_blk = first_blk;
523
524                 /* Is the whole lot zeroed? */
525                 if (!first_blk) {
526                         /* Linux XFS shouldn't generate totally zeroed logs -
527                          * mkfs etc write a dummy unmount record to a fresh
528                          * log so we can store the uuid in there
529                          */
530                         xlog_warn("XFS: totally zeroed log");
531                 }
532
533                 return 0;
534         } else if (error) {
535                 xlog_warn("XFS: empty log check failed");
536                 return error;
537         }
538
539         first_blk = 0;                  /* get cycle # of 1st block */
540         bp = xlog_get_bp(log, 1);
541         if (!bp)
542                 return ENOMEM;
543         if ((error = xlog_bread(log, 0, 1, bp)))
544                 goto bp_err;
545         offset = xlog_align(log, 0, 1, bp);
546         first_half_cycle = xlog_get_cycle(offset);
547
548         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
549         if ((error = xlog_bread(log, last_blk, 1, bp)))
550                 goto bp_err;
551         offset = xlog_align(log, last_blk, 1, bp);
552         last_half_cycle = xlog_get_cycle(offset);
553         ASSERT(last_half_cycle != 0);
554
555         /*
556          * If the 1st half cycle number is equal to the last half cycle number,
557          * then the entire log is stamped with the same cycle number.  In this
558          * case, head_blk can't be set to zero (which makes sense).  The below
559          * math doesn't work out properly with head_blk equal to zero.  Instead,
560          * we set it to log_bbnum which is an invalid block number, but this
561          * value makes the math correct.  If head_blk doesn't changed through
562          * all the tests below, *head_blk is set to zero at the very end rather
563          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
564          * in a circular file.
565          */
566         if (first_half_cycle == last_half_cycle) {
567                 /*
568                  * In this case we believe that the entire log should have
569                  * cycle number last_half_cycle.  We need to scan backwards
570                  * from the end verifying that there are no holes still
571                  * containing last_half_cycle - 1.  If we find such a hole,
572                  * then the start of that hole will be the new head.  The
573                  * simple case looks like
574                  *        x | x ... | x - 1 | x
575                  * Another case that fits this picture would be
576                  *        x | x + 1 | x ... | x
577                  * In this case the head really is somewhere at the end of the
578                  * log, as one of the latest writes at the beginning was
579                  * incomplete.
580                  * One more case is
581                  *        x | x + 1 | x ... | x - 1 | x
582                  * This is really the combination of the above two cases, and
583                  * the head has to end up at the start of the x-1 hole at the
584                  * end of the log.
585                  *
586                  * In the 256k log case, we will read from the beginning to the
587                  * end of the log and search for cycle numbers equal to x-1.
588                  * We don't worry about the x+1 blocks that we encounter,
589                  * because we know that they cannot be the head since the log
590                  * started with x.
591                  */
592                 head_blk = log_bbnum;
593                 stop_on_cycle = last_half_cycle - 1;
594         } else {
595                 /*
596                  * In this case we want to find the first block with cycle
597                  * number matching last_half_cycle.  We expect the log to be
598                  * some variation on
599                  *        x + 1 ... | x ...
600                  * The first block with cycle number x (last_half_cycle) will
601                  * be where the new head belongs.  First we do a binary search
602                  * for the first occurrence of last_half_cycle.  The binary
603                  * search may not be totally accurate, so then we scan back
604                  * from there looking for occurrences of last_half_cycle before
605                  * us.  If that backwards scan wraps around the beginning of
606                  * the log, then we look for occurrences of last_half_cycle - 1
607                  * at the end of the log.  The cases we're looking for look
608                  * like
609                  *        x + 1 ... | x | x + 1 | x ...
610                  *                               ^ binary search stopped here
611                  * or
612                  *        x + 1 ... | x ... | x - 1 | x
613                  *        <---------> less than scan distance
614                  */
615                 stop_on_cycle = last_half_cycle;
616                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
617                                                 &head_blk, last_half_cycle)))
618                         goto bp_err;
619         }
620
621         /*
622          * Now validate the answer.  Scan back some number of maximum possible
623          * blocks and make sure each one has the expected cycle number.  The
624          * maximum is determined by the total possible amount of buffering
625          * in the in-core log.  The following number can be made tighter if
626          * we actually look at the block size of the filesystem.
627          */
628         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
629         if (head_blk >= num_scan_bblks) {
630                 /*
631                  * We are guaranteed that the entire check can be performed
632                  * in one buffer.
633                  */
634                 start_blk = head_blk - num_scan_bblks;
635                 if ((error = xlog_find_verify_cycle(log,
636                                                 start_blk, num_scan_bblks,
637                                                 stop_on_cycle, &new_blk)))
638                         goto bp_err;
639                 if (new_blk != -1)
640                         head_blk = new_blk;
641         } else {                /* need to read 2 parts of log */
642                 /*
643                  * We are going to scan backwards in the log in two parts.
644                  * First we scan the physical end of the log.  In this part
645                  * of the log, we are looking for blocks with cycle number
646                  * last_half_cycle - 1.
647                  * If we find one, then we know that the log starts there, as
648                  * we've found a hole that didn't get written in going around
649                  * the end of the physical log.  The simple case for this is
650                  *        x + 1 ... | x ... | x - 1 | x
651                  *        <---------> less than scan distance
652                  * If all of the blocks at the end of the log have cycle number
653                  * last_half_cycle, then we check the blocks at the start of
654                  * the log looking for occurrences of last_half_cycle.  If we
655                  * find one, then our current estimate for the location of the
656                  * first occurrence of last_half_cycle is wrong and we move
657                  * back to the hole we've found.  This case looks like
658                  *        x + 1 ... | x | x + 1 | x ...
659                  *                               ^ binary search stopped here
660                  * Another case we need to handle that only occurs in 256k
661                  * logs is
662                  *        x + 1 ... | x ... | x+1 | x ...
663                  *                   ^ binary search stops here
664                  * In a 256k log, the scan at the end of the log will see the
665                  * x + 1 blocks.  We need to skip past those since that is
666                  * certainly not the head of the log.  By searching for
667                  * last_half_cycle-1 we accomplish that.
668                  */
669                 start_blk = log_bbnum - num_scan_bblks + head_blk;
670                 ASSERT(head_blk <= INT_MAX &&
671                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
672                 if ((error = xlog_find_verify_cycle(log, start_blk,
673                                         num_scan_bblks - (int)head_blk,
674                                         (stop_on_cycle - 1), &new_blk)))
675                         goto bp_err;
676                 if (new_blk != -1) {
677                         head_blk = new_blk;
678                         goto bad_blk;
679                 }
680
681                 /*
682                  * Scan beginning of log now.  The last part of the physical
683                  * log is good.  This scan needs to verify that it doesn't find
684                  * the last_half_cycle.
685                  */
686                 start_blk = 0;
687                 ASSERT(head_blk <= INT_MAX);
688                 if ((error = xlog_find_verify_cycle(log,
689                                         start_blk, (int)head_blk,
690                                         stop_on_cycle, &new_blk)))
691                         goto bp_err;
692                 if (new_blk != -1)
693                         head_blk = new_blk;
694         }
695
696  bad_blk:
697         /*
698          * Now we need to make sure head_blk is not pointing to a block in
699          * the middle of a log record.
700          */
701         num_scan_bblks = XLOG_REC_SHIFT(log);
702         if (head_blk >= num_scan_bblks) {
703                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
704
705                 /* start ptr at last block ptr before head_blk */
706                 if ((error = xlog_find_verify_log_record(log, start_blk,
707                                                         &head_blk, 0)) == -1) {
708                         error = XFS_ERROR(EIO);
709                         goto bp_err;
710                 } else if (error)
711                         goto bp_err;
712         } else {
713                 start_blk = 0;
714                 ASSERT(head_blk <= INT_MAX);
715                 if ((error = xlog_find_verify_log_record(log, start_blk,
716                                                         &head_blk, 0)) == -1) {
717                         /* We hit the beginning of the log during our search */
718                         start_blk = log_bbnum - num_scan_bblks + head_blk;
719                         new_blk = log_bbnum;
720                         ASSERT(start_blk <= INT_MAX &&
721                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
722                         ASSERT(head_blk <= INT_MAX);
723                         if ((error = xlog_find_verify_log_record(log,
724                                                         start_blk, &new_blk,
725                                                         (int)head_blk)) == -1) {
726                                 error = XFS_ERROR(EIO);
727                                 goto bp_err;
728                         } else if (error)
729                                 goto bp_err;
730                         if (new_blk != log_bbnum)
731                                 head_blk = new_blk;
732                 } else if (error)
733                         goto bp_err;
734         }
735
736         xlog_put_bp(bp);
737         if (head_blk == log_bbnum)
738                 *return_head_blk = 0;
739         else
740                 *return_head_blk = head_blk;
741         /*
742          * When returning here, we have a good block number.  Bad block
743          * means that during a previous crash, we didn't have a clean break
744          * from cycle number N to cycle number N-1.  In this case, we need
745          * to find the first block with cycle number N-1.
746          */
747         return 0;
748
749  bp_err:
750         xlog_put_bp(bp);
751
752         if (error)
753             xlog_warn("XFS: failed to find log head");
754         return error;
755 }
756
757 /*
758  * Find the sync block number or the tail of the log.
759  *
760  * This will be the block number of the last record to have its
761  * associated buffers synced to disk.  Every log record header has
762  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
763  * to get a sync block number.  The only concern is to figure out which
764  * log record header to believe.
765  *
766  * The following algorithm uses the log record header with the largest
767  * lsn.  The entire log record does not need to be valid.  We only care
768  * that the header is valid.
769  *
770  * We could speed up search by using current head_blk buffer, but it is not
771  * available.
772  */
773 int
774 xlog_find_tail(
775         xlog_t                  *log,
776         xfs_daddr_t             *head_blk,
777         xfs_daddr_t             *tail_blk)
778 {
779         xlog_rec_header_t       *rhead;
780         xlog_op_header_t        *op_head;
781         xfs_caddr_t             offset = NULL;
782         xfs_buf_t               *bp;
783         int                     error, i, found;
784         xfs_daddr_t             umount_data_blk;
785         xfs_daddr_t             after_umount_blk;
786         xfs_lsn_t               tail_lsn;
787         int                     hblks;
788
789         found = 0;
790
791         /*
792          * Find previous log record
793          */
794         if ((error = xlog_find_head(log, head_blk)))
795                 return error;
796
797         bp = xlog_get_bp(log, 1);
798         if (!bp)
799                 return ENOMEM;
800         if (*head_blk == 0) {                           /* special case */
801                 if ((error = xlog_bread(log, 0, 1, bp)))
802                         goto bread_err;
803                 offset = xlog_align(log, 0, 1, bp);
804                 if (xlog_get_cycle(offset) == 0) {
805                         *tail_blk = 0;
806                         /* leave all other log inited values alone */
807                         goto exit;
808                 }
809         }
810
811         /*
812          * Search backwards looking for log record header block
813          */
814         ASSERT(*head_blk < INT_MAX);
815         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
816                 if ((error = xlog_bread(log, i, 1, bp)))
817                         goto bread_err;
818                 offset = xlog_align(log, i, 1, bp);
819                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
820                         found = 1;
821                         break;
822                 }
823         }
824         /*
825          * If we haven't found the log record header block, start looking
826          * again from the end of the physical log.  XXXmiken: There should be
827          * a check here to make sure we didn't search more than N blocks in
828          * the previous code.
829          */
830         if (!found) {
831                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
832                         if ((error = xlog_bread(log, i, 1, bp)))
833                                 goto bread_err;
834                         offset = xlog_align(log, i, 1, bp);
835                         if (XLOG_HEADER_MAGIC_NUM ==
836                             be32_to_cpu(*(__be32 *)offset)) {
837                                 found = 2;
838                                 break;
839                         }
840                 }
841         }
842         if (!found) {
843                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
844                 ASSERT(0);
845                 return XFS_ERROR(EIO);
846         }
847
848         /* find blk_no of tail of log */
849         rhead = (xlog_rec_header_t *)offset;
850         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
851
852         /*
853          * Reset log values according to the state of the log when we
854          * crashed.  In the case where head_blk == 0, we bump curr_cycle
855          * one because the next write starts a new cycle rather than
856          * continuing the cycle of the last good log record.  At this
857          * point we have guaranteed that all partial log records have been
858          * accounted for.  Therefore, we know that the last good log record
859          * written was complete and ended exactly on the end boundary
860          * of the physical log.
861          */
862         log->l_prev_block = i;
863         log->l_curr_block = (int)*head_blk;
864         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
865         if (found == 2)
866                 log->l_curr_cycle++;
867         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
868         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
869         log->l_grant_reserve_cycle = log->l_curr_cycle;
870         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
871         log->l_grant_write_cycle = log->l_curr_cycle;
872         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
873
874         /*
875          * Look for unmount record.  If we find it, then we know there
876          * was a clean unmount.  Since 'i' could be the last block in
877          * the physical log, we convert to a log block before comparing
878          * to the head_blk.
879          *
880          * Save the current tail lsn to use to pass to
881          * xlog_clear_stale_blocks() below.  We won't want to clear the
882          * unmount record if there is one, so we pass the lsn of the
883          * unmount record rather than the block after it.
884          */
885         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
886                 int     h_size = be32_to_cpu(rhead->h_size);
887                 int     h_version = be32_to_cpu(rhead->h_version);
888
889                 if ((h_version & XLOG_VERSION_2) &&
890                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
891                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
892                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
893                                 hblks++;
894                 } else {
895                         hblks = 1;
896                 }
897         } else {
898                 hblks = 1;
899         }
900         after_umount_blk = (i + hblks + (int)
901                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
902         tail_lsn = log->l_tail_lsn;
903         if (*head_blk == after_umount_blk &&
904             be32_to_cpu(rhead->h_num_logops) == 1) {
905                 umount_data_blk = (i + hblks) % log->l_logBBsize;
906                 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
907                         goto bread_err;
908                 }
909                 offset = xlog_align(log, umount_data_blk, 1, bp);
910                 op_head = (xlog_op_header_t *)offset;
911                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
912                         /*
913                          * Set tail and last sync so that newly written
914                          * log records will point recovery to after the
915                          * current unmount record.
916                          */
917                         log->l_tail_lsn =
918                                 xlog_assign_lsn(log->l_curr_cycle,
919                                                 after_umount_blk);
920                         log->l_last_sync_lsn =
921                                 xlog_assign_lsn(log->l_curr_cycle,
922                                                 after_umount_blk);
923                         *tail_blk = after_umount_blk;
924
925                         /*
926                          * Note that the unmount was clean. If the unmount
927                          * was not clean, we need to know this to rebuild the
928                          * superblock counters from the perag headers if we
929                          * have a filesystem using non-persistent counters.
930                          */
931                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
932                 }
933         }
934
935         /*
936          * Make sure that there are no blocks in front of the head
937          * with the same cycle number as the head.  This can happen
938          * because we allow multiple outstanding log writes concurrently,
939          * and the later writes might make it out before earlier ones.
940          *
941          * We use the lsn from before modifying it so that we'll never
942          * overwrite the unmount record after a clean unmount.
943          *
944          * Do this only if we are going to recover the filesystem
945          *
946          * NOTE: This used to say "if (!readonly)"
947          * However on Linux, we can & do recover a read-only filesystem.
948          * We only skip recovery if NORECOVERY is specified on mount,
949          * in which case we would not be here.
950          *
951          * But... if the -device- itself is readonly, just skip this.
952          * We can't recover this device anyway, so it won't matter.
953          */
954         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
955                 error = xlog_clear_stale_blocks(log, tail_lsn);
956         }
957
958 bread_err:
959 exit:
960         xlog_put_bp(bp);
961
962         if (error)
963                 xlog_warn("XFS: failed to locate log tail");
964         return error;
965 }
966
967 /*
968  * Is the log zeroed at all?
969  *
970  * The last binary search should be changed to perform an X block read
971  * once X becomes small enough.  You can then search linearly through
972  * the X blocks.  This will cut down on the number of reads we need to do.
973  *
974  * If the log is partially zeroed, this routine will pass back the blkno
975  * of the first block with cycle number 0.  It won't have a complete LR
976  * preceding it.
977  *
978  * Return:
979  *      0  => the log is completely written to
980  *      -1 => use *blk_no as the first block of the log
981  *      >0 => error has occurred
982  */
983 STATIC int
984 xlog_find_zeroed(
985         xlog_t          *log,
986         xfs_daddr_t     *blk_no)
987 {
988         xfs_buf_t       *bp;
989         xfs_caddr_t     offset;
990         uint            first_cycle, last_cycle;
991         xfs_daddr_t     new_blk, last_blk, start_blk;
992         xfs_daddr_t     num_scan_bblks;
993         int             error, log_bbnum = log->l_logBBsize;
994
995         *blk_no = 0;
996
997         /* check totally zeroed log */
998         bp = xlog_get_bp(log, 1);
999         if (!bp)
1000                 return ENOMEM;
1001         if ((error = xlog_bread(log, 0, 1, bp)))
1002                 goto bp_err;
1003         offset = xlog_align(log, 0, 1, bp);
1004         first_cycle = xlog_get_cycle(offset);
1005         if (first_cycle == 0) {         /* completely zeroed log */
1006                 *blk_no = 0;
1007                 xlog_put_bp(bp);
1008                 return -1;
1009         }
1010
1011         /* check partially zeroed log */
1012         if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1013                 goto bp_err;
1014         offset = xlog_align(log, log_bbnum-1, 1, bp);
1015         last_cycle = xlog_get_cycle(offset);
1016         if (last_cycle != 0) {          /* log completely written to */
1017                 xlog_put_bp(bp);
1018                 return 0;
1019         } else if (first_cycle != 1) {
1020                 /*
1021                  * If the cycle of the last block is zero, the cycle of
1022                  * the first block must be 1. If it's not, maybe we're
1023                  * not looking at a log... Bail out.
1024                  */
1025                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1026                 return XFS_ERROR(EINVAL);
1027         }
1028
1029         /* we have a partially zeroed log */
1030         last_blk = log_bbnum-1;
1031         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1032                 goto bp_err;
1033
1034         /*
1035          * Validate the answer.  Because there is no way to guarantee that
1036          * the entire log is made up of log records which are the same size,
1037          * we scan over the defined maximum blocks.  At this point, the maximum
1038          * is not chosen to mean anything special.   XXXmiken
1039          */
1040         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1041         ASSERT(num_scan_bblks <= INT_MAX);
1042
1043         if (last_blk < num_scan_bblks)
1044                 num_scan_bblks = last_blk;
1045         start_blk = last_blk - num_scan_bblks;
1046
1047         /*
1048          * We search for any instances of cycle number 0 that occur before
1049          * our current estimate of the head.  What we're trying to detect is
1050          *        1 ... | 0 | 1 | 0...
1051          *                       ^ binary search ends here
1052          */
1053         if ((error = xlog_find_verify_cycle(log, start_blk,
1054                                          (int)num_scan_bblks, 0, &new_blk)))
1055                 goto bp_err;
1056         if (new_blk != -1)
1057                 last_blk = new_blk;
1058
1059         /*
1060          * Potentially backup over partial log record write.  We don't need
1061          * to search the end of the log because we know it is zero.
1062          */
1063         if ((error = xlog_find_verify_log_record(log, start_blk,
1064                                 &last_blk, 0)) == -1) {
1065             error = XFS_ERROR(EIO);
1066             goto bp_err;
1067         } else if (error)
1068             goto bp_err;
1069
1070         *blk_no = last_blk;
1071 bp_err:
1072         xlog_put_bp(bp);
1073         if (error)
1074                 return error;
1075         return -1;
1076 }
1077
1078 /*
1079  * These are simple subroutines used by xlog_clear_stale_blocks() below
1080  * to initialize a buffer full of empty log record headers and write
1081  * them into the log.
1082  */
1083 STATIC void
1084 xlog_add_record(
1085         xlog_t                  *log,
1086         xfs_caddr_t             buf,
1087         int                     cycle,
1088         int                     block,
1089         int                     tail_cycle,
1090         int                     tail_block)
1091 {
1092         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1093
1094         memset(buf, 0, BBSIZE);
1095         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1096         recp->h_cycle = cpu_to_be32(cycle);
1097         recp->h_version = cpu_to_be32(
1098                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1099         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1100         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1101         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1102         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1103 }
1104
1105 STATIC int
1106 xlog_write_log_records(
1107         xlog_t          *log,
1108         int             cycle,
1109         int             start_block,
1110         int             blocks,
1111         int             tail_cycle,
1112         int             tail_block)
1113 {
1114         xfs_caddr_t     offset;
1115         xfs_buf_t       *bp;
1116         int             balign, ealign;
1117         int             sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1118         int             end_block = start_block + blocks;
1119         int             bufblks;
1120         int             error = 0;
1121         int             i, j = 0;
1122
1123         bufblks = 1 << ffs(blocks);
1124         while (!(bp = xlog_get_bp(log, bufblks))) {
1125                 bufblks >>= 1;
1126                 if (bufblks <= log->l_sectbb_log)
1127                         return ENOMEM;
1128         }
1129
1130         /* We may need to do a read at the start to fill in part of
1131          * the buffer in the starting sector not covered by the first
1132          * write below.
1133          */
1134         balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1135         if (balign != start_block) {
1136                 if ((error = xlog_bread(log, start_block, 1, bp))) {
1137                         xlog_put_bp(bp);
1138                         return error;
1139                 }
1140                 j = start_block - balign;
1141         }
1142
1143         for (i = start_block; i < end_block; i += bufblks) {
1144                 int             bcount, endcount;
1145
1146                 bcount = min(bufblks, end_block - start_block);
1147                 endcount = bcount - j;
1148
1149                 /* We may need to do a read at the end to fill in part of
1150                  * the buffer in the final sector not covered by the write.
1151                  * If this is the same sector as the above read, skip it.
1152                  */
1153                 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1154                 if (j == 0 && (start_block + endcount > ealign)) {
1155                         offset = XFS_BUF_PTR(bp);
1156                         balign = BBTOB(ealign - start_block);
1157                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1158                                                 BBTOB(sectbb));
1159                         if (!error)
1160                                 error = xlog_bread(log, ealign, sectbb, bp);
1161                         if (!error)
1162                                 error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1163                         if (error)
1164                                 break;
1165                 }
1166
1167                 offset = xlog_align(log, start_block, endcount, bp);
1168                 for (; j < endcount; j++) {
1169                         xlog_add_record(log, offset, cycle, i+j,
1170                                         tail_cycle, tail_block);
1171                         offset += BBSIZE;
1172                 }
1173                 error = xlog_bwrite(log, start_block, endcount, bp);
1174                 if (error)
1175                         break;
1176                 start_block += endcount;
1177                 j = 0;
1178         }
1179         xlog_put_bp(bp);
1180         return error;
1181 }
1182
1183 /*
1184  * This routine is called to blow away any incomplete log writes out
1185  * in front of the log head.  We do this so that we won't become confused
1186  * if we come up, write only a little bit more, and then crash again.
1187  * If we leave the partial log records out there, this situation could
1188  * cause us to think those partial writes are valid blocks since they
1189  * have the current cycle number.  We get rid of them by overwriting them
1190  * with empty log records with the old cycle number rather than the
1191  * current one.
1192  *
1193  * The tail lsn is passed in rather than taken from
1194  * the log so that we will not write over the unmount record after a
1195  * clean unmount in a 512 block log.  Doing so would leave the log without
1196  * any valid log records in it until a new one was written.  If we crashed
1197  * during that time we would not be able to recover.
1198  */
1199 STATIC int
1200 xlog_clear_stale_blocks(
1201         xlog_t          *log,
1202         xfs_lsn_t       tail_lsn)
1203 {
1204         int             tail_cycle, head_cycle;
1205         int             tail_block, head_block;
1206         int             tail_distance, max_distance;
1207         int             distance;
1208         int             error;
1209
1210         tail_cycle = CYCLE_LSN(tail_lsn);
1211         tail_block = BLOCK_LSN(tail_lsn);
1212         head_cycle = log->l_curr_cycle;
1213         head_block = log->l_curr_block;
1214
1215         /*
1216          * Figure out the distance between the new head of the log
1217          * and the tail.  We want to write over any blocks beyond the
1218          * head that we may have written just before the crash, but
1219          * we don't want to overwrite the tail of the log.
1220          */
1221         if (head_cycle == tail_cycle) {
1222                 /*
1223                  * The tail is behind the head in the physical log,
1224                  * so the distance from the head to the tail is the
1225                  * distance from the head to the end of the log plus
1226                  * the distance from the beginning of the log to the
1227                  * tail.
1228                  */
1229                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1230                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1231                                          XFS_ERRLEVEL_LOW, log->l_mp);
1232                         return XFS_ERROR(EFSCORRUPTED);
1233                 }
1234                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1235         } else {
1236                 /*
1237                  * The head is behind the tail in the physical log,
1238                  * so the distance from the head to the tail is just
1239                  * the tail block minus the head block.
1240                  */
1241                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1242                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1243                                          XFS_ERRLEVEL_LOW, log->l_mp);
1244                         return XFS_ERROR(EFSCORRUPTED);
1245                 }
1246                 tail_distance = tail_block - head_block;
1247         }
1248
1249         /*
1250          * If the head is right up against the tail, we can't clear
1251          * anything.
1252          */
1253         if (tail_distance <= 0) {
1254                 ASSERT(tail_distance == 0);
1255                 return 0;
1256         }
1257
1258         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1259         /*
1260          * Take the smaller of the maximum amount of outstanding I/O
1261          * we could have and the distance to the tail to clear out.
1262          * We take the smaller so that we don't overwrite the tail and
1263          * we don't waste all day writing from the head to the tail
1264          * for no reason.
1265          */
1266         max_distance = MIN(max_distance, tail_distance);
1267
1268         if ((head_block + max_distance) <= log->l_logBBsize) {
1269                 /*
1270                  * We can stomp all the blocks we need to without
1271                  * wrapping around the end of the log.  Just do it
1272                  * in a single write.  Use the cycle number of the
1273                  * current cycle minus one so that the log will look like:
1274                  *     n ... | n - 1 ...
1275                  */
1276                 error = xlog_write_log_records(log, (head_cycle - 1),
1277                                 head_block, max_distance, tail_cycle,
1278                                 tail_block);
1279                 if (error)
1280                         return error;
1281         } else {
1282                 /*
1283                  * We need to wrap around the end of the physical log in
1284                  * order to clear all the blocks.  Do it in two separate
1285                  * I/Os.  The first write should be from the head to the
1286                  * end of the physical log, and it should use the current
1287                  * cycle number minus one just like above.
1288                  */
1289                 distance = log->l_logBBsize - head_block;
1290                 error = xlog_write_log_records(log, (head_cycle - 1),
1291                                 head_block, distance, tail_cycle,
1292                                 tail_block);
1293
1294                 if (error)
1295                         return error;
1296
1297                 /*
1298                  * Now write the blocks at the start of the physical log.
1299                  * This writes the remainder of the blocks we want to clear.
1300                  * It uses the current cycle number since we're now on the
1301                  * same cycle as the head so that we get:
1302                  *    n ... n ... | n - 1 ...
1303                  *    ^^^^^ blocks we're writing
1304                  */
1305                 distance = max_distance - (log->l_logBBsize - head_block);
1306                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1307                                 tail_cycle, tail_block);
1308                 if (error)
1309                         return error;
1310         }
1311
1312         return 0;
1313 }
1314
1315 /******************************************************************************
1316  *
1317  *              Log recover routines
1318  *
1319  ******************************************************************************
1320  */
1321
1322 STATIC xlog_recover_t *
1323 xlog_recover_find_tid(
1324         xlog_recover_t          *q,
1325         xlog_tid_t              tid)
1326 {
1327         xlog_recover_t          *p = q;
1328
1329         while (p != NULL) {
1330                 if (p->r_log_tid == tid)
1331                     break;
1332                 p = p->r_next;
1333         }
1334         return p;
1335 }
1336
1337 STATIC void
1338 xlog_recover_put_hashq(
1339         xlog_recover_t          **q,
1340         xlog_recover_t          *trans)
1341 {
1342         trans->r_next = *q;
1343         *q = trans;
1344 }
1345
1346 STATIC void
1347 xlog_recover_add_item(
1348         xlog_recover_item_t     **itemq)
1349 {
1350         xlog_recover_item_t     *item;
1351
1352         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1353         xlog_recover_insert_item_backq(itemq, item);
1354 }
1355
1356 STATIC int
1357 xlog_recover_add_to_cont_trans(
1358         xlog_recover_t          *trans,
1359         xfs_caddr_t             dp,
1360         int                     len)
1361 {
1362         xlog_recover_item_t     *item;
1363         xfs_caddr_t             ptr, old_ptr;
1364         int                     old_len;
1365
1366         item = trans->r_itemq;
1367         if (item == NULL) {
1368                 /* finish copying rest of trans header */
1369                 xlog_recover_add_item(&trans->r_itemq);
1370                 ptr = (xfs_caddr_t) &trans->r_theader +
1371                                 sizeof(xfs_trans_header_t) - len;
1372                 memcpy(ptr, dp, len); /* d, s, l */
1373                 return 0;
1374         }
1375         item = item->ri_prev;
1376
1377         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1378         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1379
1380         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1381         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1382         item->ri_buf[item->ri_cnt-1].i_len += len;
1383         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1384         return 0;
1385 }
1386
1387 /*
1388  * The next region to add is the start of a new region.  It could be
1389  * a whole region or it could be the first part of a new region.  Because
1390  * of this, the assumption here is that the type and size fields of all
1391  * format structures fit into the first 32 bits of the structure.
1392  *
1393  * This works because all regions must be 32 bit aligned.  Therefore, we
1394  * either have both fields or we have neither field.  In the case we have
1395  * neither field, the data part of the region is zero length.  We only have
1396  * a log_op_header and can throw away the header since a new one will appear
1397  * later.  If we have at least 4 bytes, then we can determine how many regions
1398  * will appear in the current log item.
1399  */
1400 STATIC int
1401 xlog_recover_add_to_trans(
1402         xlog_recover_t          *trans,
1403         xfs_caddr_t             dp,
1404         int                     len)
1405 {
1406         xfs_inode_log_format_t  *in_f;                  /* any will do */
1407         xlog_recover_item_t     *item;
1408         xfs_caddr_t             ptr;
1409
1410         if (!len)
1411                 return 0;
1412         item = trans->r_itemq;
1413         if (item == NULL) {
1414                 /* we need to catch log corruptions here */
1415                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1416                         xlog_warn("XFS: xlog_recover_add_to_trans: "
1417                                   "bad header magic number");
1418                         ASSERT(0);
1419                         return XFS_ERROR(EIO);
1420                 }
1421                 if (len == sizeof(xfs_trans_header_t))
1422                         xlog_recover_add_item(&trans->r_itemq);
1423                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1424                 return 0;
1425         }
1426
1427         ptr = kmem_alloc(len, KM_SLEEP);
1428         memcpy(ptr, dp, len);
1429         in_f = (xfs_inode_log_format_t *)ptr;
1430
1431         if (item->ri_prev->ri_total != 0 &&
1432              item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1433                 xlog_recover_add_item(&trans->r_itemq);
1434         }
1435         item = trans->r_itemq;
1436         item = item->ri_prev;
1437
1438         if (item->ri_total == 0) {              /* first region to be added */
1439                 item->ri_total  = in_f->ilf_size;
1440                 ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
1441                 item->ri_buf = kmem_zalloc((item->ri_total *
1442                                             sizeof(xfs_log_iovec_t)), KM_SLEEP);
1443         }
1444         ASSERT(item->ri_total > item->ri_cnt);
1445         /* Description region is ri_buf[0] */
1446         item->ri_buf[item->ri_cnt].i_addr = ptr;
1447         item->ri_buf[item->ri_cnt].i_len  = len;
1448         item->ri_cnt++;
1449         return 0;
1450 }
1451
1452 STATIC void
1453 xlog_recover_new_tid(
1454         xlog_recover_t          **q,
1455         xlog_tid_t              tid,
1456         xfs_lsn_t               lsn)
1457 {
1458         xlog_recover_t          *trans;
1459
1460         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1461         trans->r_log_tid   = tid;
1462         trans->r_lsn       = lsn;
1463         xlog_recover_put_hashq(q, trans);
1464 }
1465
1466 STATIC int
1467 xlog_recover_unlink_tid(
1468         xlog_recover_t          **q,
1469         xlog_recover_t          *trans)
1470 {
1471         xlog_recover_t          *tp;
1472         int                     found = 0;
1473
1474         ASSERT(trans != NULL);
1475         if (trans == *q) {
1476                 *q = (*q)->r_next;
1477         } else {
1478                 tp = *q;
1479                 while (tp) {
1480                         if (tp->r_next == trans) {
1481                                 found = 1;
1482                                 break;
1483                         }
1484                         tp = tp->r_next;
1485                 }
1486                 if (!found) {
1487                         xlog_warn(
1488                              "XFS: xlog_recover_unlink_tid: trans not found");
1489                         ASSERT(0);
1490                         return XFS_ERROR(EIO);
1491                 }
1492                 tp->r_next = tp->r_next->r_next;
1493         }
1494         return 0;
1495 }
1496
1497 STATIC void
1498 xlog_recover_insert_item_backq(
1499         xlog_recover_item_t     **q,
1500         xlog_recover_item_t     *item)
1501 {
1502         if (*q == NULL) {
1503                 item->ri_prev = item->ri_next = item;
1504                 *q = item;
1505         } else {
1506                 item->ri_next           = *q;
1507                 item->ri_prev           = (*q)->ri_prev;
1508                 (*q)->ri_prev           = item;
1509                 item->ri_prev->ri_next  = item;
1510         }
1511 }
1512
1513 STATIC void
1514 xlog_recover_insert_item_frontq(
1515         xlog_recover_item_t     **q,
1516         xlog_recover_item_t     *item)
1517 {
1518         xlog_recover_insert_item_backq(q, item);
1519         *q = item;
1520 }
1521
1522 STATIC int
1523 xlog_recover_reorder_trans(
1524         xlog_recover_t          *trans)
1525 {
1526         xlog_recover_item_t     *first_item, *itemq, *itemq_next;
1527         xfs_buf_log_format_t    *buf_f;
1528         ushort                  flags = 0;
1529
1530         first_item = itemq = trans->r_itemq;
1531         trans->r_itemq = NULL;
1532         do {
1533                 itemq_next = itemq->ri_next;
1534                 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1535
1536                 switch (ITEM_TYPE(itemq)) {
1537                 case XFS_LI_BUF:
1538                         flags = buf_f->blf_flags;
1539                         if (!(flags & XFS_BLI_CANCEL)) {
1540                                 xlog_recover_insert_item_frontq(&trans->r_itemq,
1541                                                                 itemq);
1542                                 break;
1543                         }
1544                 case XFS_LI_INODE:
1545                 case XFS_LI_DQUOT:
1546                 case XFS_LI_QUOTAOFF:
1547                 case XFS_LI_EFD:
1548                 case XFS_LI_EFI:
1549                         xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1550                         break;
1551                 default:
1552                         xlog_warn(
1553         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1554                         ASSERT(0);
1555                         return XFS_ERROR(EIO);
1556                 }
1557                 itemq = itemq_next;
1558         } while (first_item != itemq);
1559         return 0;
1560 }
1561
1562 /*
1563  * Build up the table of buf cancel records so that we don't replay
1564  * cancelled data in the second pass.  For buffer records that are
1565  * not cancel records, there is nothing to do here so we just return.
1566  *
1567  * If we get a cancel record which is already in the table, this indicates
1568  * that the buffer was cancelled multiple times.  In order to ensure
1569  * that during pass 2 we keep the record in the table until we reach its
1570  * last occurrence in the log, we keep a reference count in the cancel
1571  * record in the table to tell us how many times we expect to see this
1572  * record during the second pass.
1573  */
1574 STATIC void
1575 xlog_recover_do_buffer_pass1(
1576         xlog_t                  *log,
1577         xfs_buf_log_format_t    *buf_f)
1578 {
1579         xfs_buf_cancel_t        *bcp;
1580         xfs_buf_cancel_t        *nextp;
1581         xfs_buf_cancel_t        *prevp;
1582         xfs_buf_cancel_t        **bucket;
1583         xfs_daddr_t             blkno = 0;
1584         uint                    len = 0;
1585         ushort                  flags = 0;
1586
1587         switch (buf_f->blf_type) {
1588         case XFS_LI_BUF:
1589                 blkno = buf_f->blf_blkno;
1590                 len = buf_f->blf_len;
1591                 flags = buf_f->blf_flags;
1592                 break;
1593         }
1594
1595         /*
1596          * If this isn't a cancel buffer item, then just return.
1597          */
1598         if (!(flags & XFS_BLI_CANCEL))
1599                 return;
1600
1601         /*
1602          * Insert an xfs_buf_cancel record into the hash table of
1603          * them.  If there is already an identical record, bump
1604          * its reference count.
1605          */
1606         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1607                                           XLOG_BC_TABLE_SIZE];
1608         /*
1609          * If the hash bucket is empty then just insert a new record into
1610          * the bucket.
1611          */
1612         if (*bucket == NULL) {
1613                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1614                                                      KM_SLEEP);
1615                 bcp->bc_blkno = blkno;
1616                 bcp->bc_len = len;
1617                 bcp->bc_refcount = 1;
1618                 bcp->bc_next = NULL;
1619                 *bucket = bcp;
1620                 return;
1621         }
1622
1623         /*
1624          * The hash bucket is not empty, so search for duplicates of our
1625          * record.  If we find one them just bump its refcount.  If not
1626          * then add us at the end of the list.
1627          */
1628         prevp = NULL;
1629         nextp = *bucket;
1630         while (nextp != NULL) {
1631                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1632                         nextp->bc_refcount++;
1633                         return;
1634                 }
1635                 prevp = nextp;
1636                 nextp = nextp->bc_next;
1637         }
1638         ASSERT(prevp != NULL);
1639         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1640                                              KM_SLEEP);
1641         bcp->bc_blkno = blkno;
1642         bcp->bc_len = len;
1643         bcp->bc_refcount = 1;
1644         bcp->bc_next = NULL;
1645         prevp->bc_next = bcp;
1646 }
1647
1648 /*
1649  * Check to see whether the buffer being recovered has a corresponding
1650  * entry in the buffer cancel record table.  If it does then return 1
1651  * so that it will be cancelled, otherwise return 0.  If the buffer is
1652  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1653  * the refcount on the entry in the table and remove it from the table
1654  * if this is the last reference.
1655  *
1656  * We remove the cancel record from the table when we encounter its
1657  * last occurrence in the log so that if the same buffer is re-used
1658  * again after its last cancellation we actually replay the changes
1659  * made at that point.
1660  */
1661 STATIC int
1662 xlog_check_buffer_cancelled(
1663         xlog_t                  *log,
1664         xfs_daddr_t             blkno,
1665         uint                    len,
1666         ushort                  flags)
1667 {
1668         xfs_buf_cancel_t        *bcp;
1669         xfs_buf_cancel_t        *prevp;
1670         xfs_buf_cancel_t        **bucket;
1671
1672         if (log->l_buf_cancel_table == NULL) {
1673                 /*
1674                  * There is nothing in the table built in pass one,
1675                  * so this buffer must not be cancelled.
1676                  */
1677                 ASSERT(!(flags & XFS_BLI_CANCEL));
1678                 return 0;
1679         }
1680
1681         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1682                                           XLOG_BC_TABLE_SIZE];
1683         bcp = *bucket;
1684         if (bcp == NULL) {
1685                 /*
1686                  * There is no corresponding entry in the table built
1687                  * in pass one, so this buffer has not been cancelled.
1688                  */
1689                 ASSERT(!(flags & XFS_BLI_CANCEL));
1690                 return 0;
1691         }
1692
1693         /*
1694          * Search for an entry in the buffer cancel table that
1695          * matches our buffer.
1696          */
1697         prevp = NULL;
1698         while (bcp != NULL) {
1699                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1700                         /*
1701                          * We've go a match, so return 1 so that the
1702                          * recovery of this buffer is cancelled.
1703                          * If this buffer is actually a buffer cancel
1704                          * log item, then decrement the refcount on the
1705                          * one in the table and remove it if this is the
1706                          * last reference.
1707                          */
1708                         if (flags & XFS_BLI_CANCEL) {
1709                                 bcp->bc_refcount--;
1710                                 if (bcp->bc_refcount == 0) {
1711                                         if (prevp == NULL) {
1712                                                 *bucket = bcp->bc_next;
1713                                         } else {
1714                                                 prevp->bc_next = bcp->bc_next;
1715                                         }
1716                                         kmem_free(bcp);
1717                                 }
1718                         }
1719                         return 1;
1720                 }
1721                 prevp = bcp;
1722                 bcp = bcp->bc_next;
1723         }
1724         /*
1725          * We didn't find a corresponding entry in the table, so
1726          * return 0 so that the buffer is NOT cancelled.
1727          */
1728         ASSERT(!(flags & XFS_BLI_CANCEL));
1729         return 0;
1730 }
1731
1732 STATIC int
1733 xlog_recover_do_buffer_pass2(
1734         xlog_t                  *log,
1735         xfs_buf_log_format_t    *buf_f)
1736 {
1737         xfs_daddr_t             blkno = 0;
1738         ushort                  flags = 0;
1739         uint                    len = 0;
1740
1741         switch (buf_f->blf_type) {
1742         case XFS_LI_BUF:
1743                 blkno = buf_f->blf_blkno;
1744                 flags = buf_f->blf_flags;
1745                 len = buf_f->blf_len;
1746                 break;
1747         }
1748
1749         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1750 }
1751
1752 /*
1753  * Perform recovery for a buffer full of inodes.  In these buffers,
1754  * the only data which should be recovered is that which corresponds
1755  * to the di_next_unlinked pointers in the on disk inode structures.
1756  * The rest of the data for the inodes is always logged through the
1757  * inodes themselves rather than the inode buffer and is recovered
1758  * in xlog_recover_do_inode_trans().
1759  *
1760  * The only time when buffers full of inodes are fully recovered is
1761  * when the buffer is full of newly allocated inodes.  In this case
1762  * the buffer will not be marked as an inode buffer and so will be
1763  * sent to xlog_recover_do_reg_buffer() below during recovery.
1764  */
1765 STATIC int
1766 xlog_recover_do_inode_buffer(
1767         xfs_mount_t             *mp,
1768         xlog_recover_item_t     *item,
1769         xfs_buf_t               *bp,
1770         xfs_buf_log_format_t    *buf_f)
1771 {
1772         int                     i;
1773         int                     item_index;
1774         int                     bit;
1775         int                     nbits;
1776         int                     reg_buf_offset;
1777         int                     reg_buf_bytes;
1778         int                     next_unlinked_offset;
1779         int                     inodes_per_buf;
1780         xfs_agino_t             *logged_nextp;
1781         xfs_agino_t             *buffer_nextp;
1782         unsigned int            *data_map = NULL;
1783         unsigned int            map_size = 0;
1784
1785         switch (buf_f->blf_type) {
1786         case XFS_LI_BUF:
1787                 data_map = buf_f->blf_data_map;
1788                 map_size = buf_f->blf_map_size;
1789                 break;
1790         }
1791         /*
1792          * Set the variables corresponding to the current region to
1793          * 0 so that we'll initialize them on the first pass through
1794          * the loop.
1795          */
1796         reg_buf_offset = 0;
1797         reg_buf_bytes = 0;
1798         bit = 0;
1799         nbits = 0;
1800         item_index = 0;
1801         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1802         for (i = 0; i < inodes_per_buf; i++) {
1803                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1804                         offsetof(xfs_dinode_t, di_next_unlinked);
1805
1806                 while (next_unlinked_offset >=
1807                        (reg_buf_offset + reg_buf_bytes)) {
1808                         /*
1809                          * The next di_next_unlinked field is beyond
1810                          * the current logged region.  Find the next
1811                          * logged region that contains or is beyond
1812                          * the current di_next_unlinked field.
1813                          */
1814                         bit += nbits;
1815                         bit = xfs_next_bit(data_map, map_size, bit);
1816
1817                         /*
1818                          * If there are no more logged regions in the
1819                          * buffer, then we're done.
1820                          */
1821                         if (bit == -1) {
1822                                 return 0;
1823                         }
1824
1825                         nbits = xfs_contig_bits(data_map, map_size,
1826                                                          bit);
1827                         ASSERT(nbits > 0);
1828                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1829                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1830                         item_index++;
1831                 }
1832
1833                 /*
1834                  * If the current logged region starts after the current
1835                  * di_next_unlinked field, then move on to the next
1836                  * di_next_unlinked field.
1837                  */
1838                 if (next_unlinked_offset < reg_buf_offset) {
1839                         continue;
1840                 }
1841
1842                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1843                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1844                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1845
1846                 /*
1847                  * The current logged region contains a copy of the
1848                  * current di_next_unlinked field.  Extract its value
1849                  * and copy it to the buffer copy.
1850                  */
1851                 logged_nextp = (xfs_agino_t *)
1852                                ((char *)(item->ri_buf[item_index].i_addr) +
1853                                 (next_unlinked_offset - reg_buf_offset));
1854                 if (unlikely(*logged_nextp == 0)) {
1855                         xfs_fs_cmn_err(CE_ALERT, mp,
1856                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1857                                 item, bp);
1858                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1859                                          XFS_ERRLEVEL_LOW, mp);
1860                         return XFS_ERROR(EFSCORRUPTED);
1861                 }
1862
1863                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1864                                               next_unlinked_offset);
1865                 *buffer_nextp = *logged_nextp;
1866         }
1867
1868         return 0;
1869 }
1870
1871 /*
1872  * Perform a 'normal' buffer recovery.  Each logged region of the
1873  * buffer should be copied over the corresponding region in the
1874  * given buffer.  The bitmap in the buf log format structure indicates
1875  * where to place the logged data.
1876  */
1877 /*ARGSUSED*/
1878 STATIC void
1879 xlog_recover_do_reg_buffer(
1880         xlog_recover_item_t     *item,
1881         xfs_buf_t               *bp,
1882         xfs_buf_log_format_t    *buf_f)
1883 {
1884         int                     i;
1885         int                     bit;
1886         int                     nbits;
1887         unsigned int            *data_map = NULL;
1888         unsigned int            map_size = 0;
1889         int                     error;
1890
1891         switch (buf_f->blf_type) {
1892         case XFS_LI_BUF:
1893                 data_map = buf_f->blf_data_map;
1894                 map_size = buf_f->blf_map_size;
1895                 break;
1896         }
1897         bit = 0;
1898         i = 1;  /* 0 is the buf format structure */
1899         while (1) {
1900                 bit = xfs_next_bit(data_map, map_size, bit);
1901                 if (bit == -1)
1902                         break;
1903                 nbits = xfs_contig_bits(data_map, map_size, bit);
1904                 ASSERT(nbits > 0);
1905                 ASSERT(item->ri_buf[i].i_addr != NULL);
1906                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1907                 ASSERT(XFS_BUF_COUNT(bp) >=
1908                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1909
1910                 /*
1911                  * Do a sanity check if this is a dquot buffer. Just checking
1912                  * the first dquot in the buffer should do. XXXThis is
1913                  * probably a good thing to do for other buf types also.
1914                  */
1915                 error = 0;
1916                 if (buf_f->blf_flags &
1917                    (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1918                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1919                                                item->ri_buf[i].i_addr,
1920                                                -1, 0, XFS_QMOPT_DOWARN,
1921                                                "dquot_buf_recover");
1922                 }
1923                 if (!error)
1924                         memcpy(xfs_buf_offset(bp,
1925                                 (uint)bit << XFS_BLI_SHIFT),    /* dest */
1926                                 item->ri_buf[i].i_addr,         /* source */
1927                                 nbits<<XFS_BLI_SHIFT);          /* length */
1928                 i++;
1929                 bit += nbits;
1930         }
1931
1932         /* Shouldn't be any more regions */
1933         ASSERT(i == item->ri_total);
1934 }
1935
1936 /*
1937  * Do some primitive error checking on ondisk dquot data structures.
1938  */
1939 int
1940 xfs_qm_dqcheck(
1941         xfs_disk_dquot_t *ddq,
1942         xfs_dqid_t       id,
1943         uint             type,    /* used only when IO_dorepair is true */
1944         uint             flags,
1945         char             *str)
1946 {
1947         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1948         int             errs = 0;
1949
1950         /*
1951          * We can encounter an uninitialized dquot buffer for 2 reasons:
1952          * 1. If we crash while deleting the quotainode(s), and those blks got
1953          *    used for user data. This is because we take the path of regular
1954          *    file deletion; however, the size field of quotainodes is never
1955          *    updated, so all the tricks that we play in itruncate_finish
1956          *    don't quite matter.
1957          *
1958          * 2. We don't play the quota buffers when there's a quotaoff logitem.
1959          *    But the allocation will be replayed so we'll end up with an
1960          *    uninitialized quota block.
1961          *
1962          * This is all fine; things are still consistent, and we haven't lost
1963          * any quota information. Just don't complain about bad dquot blks.
1964          */
1965         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
1966                 if (flags & XFS_QMOPT_DOWARN)
1967                         cmn_err(CE_ALERT,
1968                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
1969                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
1970                 errs++;
1971         }
1972         if (ddq->d_version != XFS_DQUOT_VERSION) {
1973                 if (flags & XFS_QMOPT_DOWARN)
1974                         cmn_err(CE_ALERT,
1975                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
1976                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
1977                 errs++;
1978         }
1979
1980         if (ddq->d_flags != XFS_DQ_USER &&
1981             ddq->d_flags != XFS_DQ_PROJ &&
1982             ddq->d_flags != XFS_DQ_GROUP) {
1983                 if (flags & XFS_QMOPT_DOWARN)
1984                         cmn_err(CE_ALERT,
1985                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
1986                         str, id, ddq->d_flags);
1987                 errs++;
1988         }
1989
1990         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
1991                 if (flags & XFS_QMOPT_DOWARN)
1992                         cmn_err(CE_ALERT,
1993                         "%s : ondisk-dquot 0x%p, ID mismatch: "
1994                         "0x%x expected, found id 0x%x",
1995                         str, ddq, id, be32_to_cpu(ddq->d_id));
1996                 errs++;
1997         }
1998
1999         if (!errs && ddq->d_id) {
2000                 if (ddq->d_blk_softlimit &&
2001                     be64_to_cpu(ddq->d_bcount) >=
2002                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2003                         if (!ddq->d_btimer) {
2004                                 if (flags & XFS_QMOPT_DOWARN)
2005                                         cmn_err(CE_ALERT,
2006                                         "%s : Dquot ID 0x%x (0x%p) "
2007                                         "BLK TIMER NOT STARTED",
2008                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2009                                 errs++;
2010                         }
2011                 }
2012                 if (ddq->d_ino_softlimit &&
2013                     be64_to_cpu(ddq->d_icount) >=
2014                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2015                         if (!ddq->d_itimer) {
2016                                 if (flags & XFS_QMOPT_DOWARN)
2017                                         cmn_err(CE_ALERT,
2018                                         "%s : Dquot ID 0x%x (0x%p) "
2019                                         "INODE TIMER NOT STARTED",
2020                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2021                                 errs++;
2022                         }
2023                 }
2024                 if (ddq->d_rtb_softlimit &&
2025                     be64_to_cpu(ddq->d_rtbcount) >=
2026                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2027                         if (!ddq->d_rtbtimer) {
2028                                 if (flags & XFS_QMOPT_DOWARN)
2029                                         cmn_err(CE_ALERT,
2030                                         "%s : Dquot ID 0x%x (0x%p) "
2031                                         "RTBLK TIMER NOT STARTED",
2032                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2033                                 errs++;
2034                         }
2035                 }
2036         }
2037
2038         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2039                 return errs;
2040
2041         if (flags & XFS_QMOPT_DOWARN)
2042                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2043
2044         /*
2045          * Typically, a repair is only requested by quotacheck.
2046          */
2047         ASSERT(id != -1);
2048         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2049         memset(d, 0, sizeof(xfs_dqblk_t));
2050
2051         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2052         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2053         d->dd_diskdq.d_flags = type;
2054         d->dd_diskdq.d_id = cpu_to_be32(id);
2055
2056         return errs;
2057 }
2058
2059 /*
2060  * Perform a dquot buffer recovery.
2061  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2062  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2063  * Else, treat it as a regular buffer and do recovery.
2064  */
2065 STATIC void
2066 xlog_recover_do_dquot_buffer(
2067         xfs_mount_t             *mp,
2068         xlog_t                  *log,
2069         xlog_recover_item_t     *item,
2070         xfs_buf_t               *bp,
2071         xfs_buf_log_format_t    *buf_f)
2072 {
2073         uint                    type;
2074
2075         /*
2076          * Filesystems are required to send in quota flags at mount time.
2077          */
2078         if (mp->m_qflags == 0) {
2079                 return;
2080         }
2081
2082         type = 0;
2083         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2084                 type |= XFS_DQ_USER;
2085         if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2086                 type |= XFS_DQ_PROJ;
2087         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2088                 type |= XFS_DQ_GROUP;
2089         /*
2090          * This type of quotas was turned off, so ignore this buffer
2091          */
2092         if (log->l_quotaoffs_flag & type)
2093                 return;
2094
2095         xlog_recover_do_reg_buffer(item, bp, buf_f);
2096 }
2097
2098 /*
2099  * This routine replays a modification made to a buffer at runtime.
2100  * There are actually two types of buffer, regular and inode, which
2101  * are handled differently.  Inode buffers are handled differently
2102  * in that we only recover a specific set of data from them, namely
2103  * the inode di_next_unlinked fields.  This is because all other inode
2104  * data is actually logged via inode records and any data we replay
2105  * here which overlaps that may be stale.
2106  *
2107  * When meta-data buffers are freed at run time we log a buffer item
2108  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2109  * of the buffer in the log should not be replayed at recovery time.
2110  * This is so that if the blocks covered by the buffer are reused for
2111  * file data before we crash we don't end up replaying old, freed
2112  * meta-data into a user's file.
2113  *
2114  * To handle the cancellation of buffer log items, we make two passes
2115  * over the log during recovery.  During the first we build a table of
2116  * those buffers which have been cancelled, and during the second we
2117  * only replay those buffers which do not have corresponding cancel
2118  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2119  * for more details on the implementation of the table of cancel records.
2120  */
2121 STATIC int
2122 xlog_recover_do_buffer_trans(
2123         xlog_t                  *log,
2124         xlog_recover_item_t     *item,
2125         int                     pass)
2126 {
2127         xfs_buf_log_format_t    *buf_f;
2128         xfs_mount_t             *mp;
2129         xfs_buf_t               *bp;
2130         int                     error;
2131         int                     cancel;
2132         xfs_daddr_t             blkno;
2133         int                     len;
2134         ushort                  flags;
2135
2136         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2137
2138         if (pass == XLOG_RECOVER_PASS1) {
2139                 /*
2140                  * In this pass we're only looking for buf items
2141                  * with the XFS_BLI_CANCEL bit set.
2142                  */
2143                 xlog_recover_do_buffer_pass1(log, buf_f);
2144                 return 0;
2145         } else {
2146                 /*
2147                  * In this pass we want to recover all the buffers
2148                  * which have not been cancelled and are not
2149                  * cancellation buffers themselves.  The routine
2150                  * we call here will tell us whether or not to
2151                  * continue with the replay of this buffer.
2152                  */
2153                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2154                 if (cancel) {
2155                         return 0;
2156                 }
2157         }
2158         switch (buf_f->blf_type) {
2159         case XFS_LI_BUF:
2160                 blkno = buf_f->blf_blkno;
2161                 len = buf_f->blf_len;
2162                 flags = buf_f->blf_flags;
2163                 break;
2164         default:
2165                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2166                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2167                         buf_f->blf_type, log->l_mp->m_logname ?
2168                         log->l_mp->m_logname : "internal");
2169                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2170                                  XFS_ERRLEVEL_LOW, log->l_mp);
2171                 return XFS_ERROR(EFSCORRUPTED);
2172         }
2173
2174         mp = log->l_mp;
2175         if (flags & XFS_BLI_INODE_BUF) {
2176                 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2177                                                                 XFS_BUF_LOCK);
2178         } else {
2179                 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2180         }
2181         if (XFS_BUF_ISERROR(bp)) {
2182                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2183                                   bp, blkno);
2184                 error = XFS_BUF_GETERROR(bp);
2185                 xfs_buf_relse(bp);
2186                 return error;
2187         }
2188
2189         error = 0;
2190         if (flags & XFS_BLI_INODE_BUF) {
2191                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2192         } else if (flags &
2193                   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2194                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2195         } else {
2196                 xlog_recover_do_reg_buffer(item, bp, buf_f);
2197         }
2198         if (error)
2199                 return XFS_ERROR(error);
2200
2201         /*
2202          * Perform delayed write on the buffer.  Asynchronous writes will be
2203          * slower when taking into account all the buffers to be flushed.
2204          *
2205          * Also make sure that only inode buffers with good sizes stay in
2206          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2207          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2208          * buffers in the log can be a different size if the log was generated
2209          * by an older kernel using unclustered inode buffers or a newer kernel
2210          * running with a different inode cluster size.  Regardless, if the
2211          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2212          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2213          * the buffer out of the buffer cache so that the buffer won't
2214          * overlap with future reads of those inodes.
2215          */
2216         if (XFS_DINODE_MAGIC ==
2217             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2218             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2219                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2220                 XFS_BUF_STALE(bp);
2221                 error = xfs_bwrite(mp, bp);
2222         } else {
2223                 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2224                 bp->b_mount = mp;
2225                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2226                 xfs_bdwrite(mp, bp);
2227         }
2228
2229         return (error);
2230 }
2231
2232 STATIC int
2233 xlog_recover_do_inode_trans(
2234         xlog_t                  *log,
2235         xlog_recover_item_t     *item,
2236         int                     pass)
2237 {
2238         xfs_inode_log_format_t  *in_f;
2239         xfs_mount_t             *mp;
2240         xfs_buf_t               *bp;
2241         xfs_dinode_t            *dip;
2242         xfs_ino_t               ino;
2243         int                     len;
2244         xfs_caddr_t             src;
2245         xfs_caddr_t             dest;
2246         int                     error;
2247         int                     attr_index;
2248         uint                    fields;
2249         xfs_icdinode_t          *dicp;
2250         int                     need_free = 0;
2251
2252         if (pass == XLOG_RECOVER_PASS1) {
2253                 return 0;
2254         }
2255
2256         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2257                 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2258         } else {
2259                 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2260                         sizeof(xfs_inode_log_format_t), KM_SLEEP);
2261                 need_free = 1;
2262                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2263                 if (error)
2264                         goto error;
2265         }
2266         ino = in_f->ilf_ino;
2267         mp = log->l_mp;
2268
2269         /*
2270          * Inode buffers can be freed, look out for it,
2271          * and do not replay the inode.
2272          */
2273         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2274                                         in_f->ilf_len, 0)) {
2275                 error = 0;
2276                 goto error;
2277         }
2278
2279         bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno,
2280                                 in_f->ilf_len, XFS_BUF_LOCK);
2281         if (XFS_BUF_ISERROR(bp)) {
2282                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2283                                   bp, in_f->ilf_blkno);
2284                 error = XFS_BUF_GETERROR(bp);
2285                 xfs_buf_relse(bp);
2286                 goto error;
2287         }
2288         error = 0;
2289         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2290         dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2291
2292         /*
2293          * Make sure the place we're flushing out to really looks
2294          * like an inode!
2295          */
2296         if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2297                 xfs_buf_relse(bp);
2298                 xfs_fs_cmn_err(CE_ALERT, mp,
2299                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2300                         dip, bp, ino);
2301                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2302                                  XFS_ERRLEVEL_LOW, mp);
2303                 error = EFSCORRUPTED;
2304                 goto error;
2305         }
2306         dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
2307         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2308                 xfs_buf_relse(bp);
2309                 xfs_fs_cmn_err(CE_ALERT, mp,
2310                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2311                         item, ino);
2312                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2313                                  XFS_ERRLEVEL_LOW, mp);
2314                 error = EFSCORRUPTED;
2315                 goto error;
2316         }
2317
2318         /* Skip replay when the on disk inode is newer than the log one */
2319         if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2320                 /*
2321                  * Deal with the wrap case, DI_MAX_FLUSH is less
2322                  * than smaller numbers
2323                  */
2324                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2325                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2326                         /* do nothing */
2327                 } else {
2328                         xfs_buf_relse(bp);
2329                         error = 0;
2330                         goto error;
2331                 }
2332         }
2333         /* Take the opportunity to reset the flush iteration count */
2334         dicp->di_flushiter = 0;
2335
2336         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2337                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2338                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2339                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2340                                          XFS_ERRLEVEL_LOW, mp, dicp);
2341                         xfs_buf_relse(bp);
2342                         xfs_fs_cmn_err(CE_ALERT, mp,
2343                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2344                                 item, dip, bp, ino);
2345                         error = EFSCORRUPTED;
2346                         goto error;
2347                 }
2348         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2349                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2350                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2351                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2352                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2353                                              XFS_ERRLEVEL_LOW, mp, dicp);
2354                         xfs_buf_relse(bp);
2355                         xfs_fs_cmn_err(CE_ALERT, mp,
2356                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2357                                 item, dip, bp, ino);
2358                         error = EFSCORRUPTED;
2359                         goto error;
2360                 }
2361         }
2362         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2363                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2364                                      XFS_ERRLEVEL_LOW, mp, dicp);
2365                 xfs_buf_relse(bp);
2366                 xfs_fs_cmn_err(CE_ALERT, mp,
2367                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2368                         item, dip, bp, ino,
2369                         dicp->di_nextents + dicp->di_anextents,
2370                         dicp->di_nblocks);
2371                 error = EFSCORRUPTED;
2372                 goto error;
2373         }
2374         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2375                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2376                                      XFS_ERRLEVEL_LOW, mp, dicp);
2377                 xfs_buf_relse(bp);
2378                 xfs_fs_cmn_err(CE_ALERT, mp,
2379                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2380                         item, dip, bp, ino, dicp->di_forkoff);
2381                 error = EFSCORRUPTED;
2382                 goto error;
2383         }
2384         if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2385                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2386                                      XFS_ERRLEVEL_LOW, mp, dicp);
2387                 xfs_buf_relse(bp);
2388                 xfs_fs_cmn_err(CE_ALERT, mp,
2389                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2390                         item->ri_buf[1].i_len, item);
2391                 error = EFSCORRUPTED;
2392                 goto error;
2393         }
2394
2395         /* The core is in in-core format */
2396         xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
2397
2398         /* the rest is in on-disk format */
2399         if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2400                 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2401                         item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2402                         item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2403         }
2404
2405         fields = in_f->ilf_fields;
2406         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2407         case XFS_ILOG_DEV:
2408                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2409                 break;
2410         case XFS_ILOG_UUID:
2411                 memcpy(XFS_DFORK_DPTR(dip),
2412                        &in_f->ilf_u.ilfu_uuid,
2413                        sizeof(uuid_t));
2414                 break;
2415         }
2416
2417         if (in_f->ilf_size == 2)
2418                 goto write_inode_buffer;
2419         len = item->ri_buf[2].i_len;
2420         src = item->ri_buf[2].i_addr;
2421         ASSERT(in_f->ilf_size <= 4);
2422         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2423         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2424                (len == in_f->ilf_dsize));
2425
2426         switch (fields & XFS_ILOG_DFORK) {
2427         case XFS_ILOG_DDATA:
2428         case XFS_ILOG_DEXT:
2429                 memcpy(XFS_DFORK_DPTR(dip), src, len);
2430                 break;
2431
2432         case XFS_ILOG_DBROOT:
2433                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2434                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2435                                  XFS_DFORK_DSIZE(dip, mp));
2436                 break;
2437
2438         default:
2439                 /*
2440                  * There are no data fork flags set.
2441                  */
2442                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2443                 break;
2444         }
2445
2446         /*
2447          * If we logged any attribute data, recover it.  There may or
2448          * may not have been any other non-core data logged in this
2449          * transaction.
2450          */
2451         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2452                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2453                         attr_index = 3;
2454                 } else {
2455                         attr_index = 2;
2456                 }
2457                 len = item->ri_buf[attr_index].i_len;
2458                 src = item->ri_buf[attr_index].i_addr;
2459                 ASSERT(len == in_f->ilf_asize);
2460
2461                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2462                 case XFS_ILOG_ADATA:
2463                 case XFS_ILOG_AEXT:
2464                         dest = XFS_DFORK_APTR(dip);
2465                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2466                         memcpy(dest, src, len);
2467                         break;
2468
2469                 case XFS_ILOG_ABROOT:
2470                         dest = XFS_DFORK_APTR(dip);
2471                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2472                                          len, (xfs_bmdr_block_t*)dest,
2473                                          XFS_DFORK_ASIZE(dip, mp));
2474                         break;
2475
2476                 default:
2477                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2478                         ASSERT(0);
2479                         xfs_buf_relse(bp);
2480                         error = EIO;
2481                         goto error;
2482                 }
2483         }
2484
2485 write_inode_buffer:
2486         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2487                 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2488                 bp->b_mount = mp;
2489                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2490                 xfs_bdwrite(mp, bp);
2491         } else {
2492                 XFS_BUF_STALE(bp);
2493                 error = xfs_bwrite(mp, bp);
2494         }
2495
2496 error:
2497         if (need_free)
2498                 kmem_free(in_f);
2499         return XFS_ERROR(error);
2500 }
2501
2502 /*
2503  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2504  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2505  * of that type.
2506  */
2507 STATIC int
2508 xlog_recover_do_quotaoff_trans(
2509         xlog_t                  *log,
2510         xlog_recover_item_t     *item,
2511         int                     pass)
2512 {
2513         xfs_qoff_logformat_t    *qoff_f;
2514
2515         if (pass == XLOG_RECOVER_PASS2) {
2516                 return (0);
2517         }
2518
2519         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2520         ASSERT(qoff_f);
2521
2522         /*
2523          * The logitem format's flag tells us if this was user quotaoff,
2524          * group/project quotaoff or both.
2525          */
2526         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2527                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2528         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2529                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2530         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2531                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2532
2533         return (0);
2534 }
2535
2536 /*
2537  * Recover a dquot record
2538  */
2539 STATIC int
2540 xlog_recover_do_dquot_trans(
2541         xlog_t                  *log,
2542         xlog_recover_item_t     *item,
2543         int                     pass)
2544 {
2545         xfs_mount_t             *mp;
2546         xfs_buf_t               *bp;
2547         struct xfs_disk_dquot   *ddq, *recddq;
2548         int                     error;
2549         xfs_dq_logformat_t      *dq_f;
2550         uint                    type;
2551
2552         if (pass == XLOG_RECOVER_PASS1) {
2553                 return 0;
2554         }
2555         mp = log->l_mp;
2556
2557         /*
2558          * Filesystems are required to send in quota flags at mount time.
2559          */
2560         if (mp->m_qflags == 0)
2561                 return (0);
2562
2563         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2564         ASSERT(recddq);
2565         /*
2566          * This type of quotas was turned off, so ignore this record.
2567          */
2568         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2569         ASSERT(type);
2570         if (log->l_quotaoffs_flag & type)
2571                 return (0);
2572
2573         /*
2574          * At this point we know that quota was _not_ turned off.
2575          * Since the mount flags are not indicating to us otherwise, this
2576          * must mean that quota is on, and the dquot needs to be replayed.
2577          * Remember that we may not have fully recovered the superblock yet,
2578          * so we can't do the usual trick of looking at the SB quota bits.
2579          *
2580          * The other possibility, of course, is that the quota subsystem was
2581          * removed since the last mount - ENOSYS.
2582          */
2583         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2584         ASSERT(dq_f);
2585         if ((error = xfs_qm_dqcheck(recddq,
2586                            dq_f->qlf_id,
2587                            0, XFS_QMOPT_DOWARN,
2588                            "xlog_recover_do_dquot_trans (log copy)"))) {
2589                 return XFS_ERROR(EIO);
2590         }
2591         ASSERT(dq_f->qlf_len == 1);
2592
2593         error = xfs_read_buf(mp, mp->m_ddev_targp,
2594                              dq_f->qlf_blkno,
2595                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2596                              0, &bp);
2597         if (error) {
2598                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2599                                   bp, dq_f->qlf_blkno);
2600                 return error;
2601         }
2602         ASSERT(bp);
2603         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2604
2605         /*
2606          * At least the magic num portion should be on disk because this
2607          * was among a chunk of dquots created earlier, and we did some
2608          * minimal initialization then.
2609          */
2610         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2611                            "xlog_recover_do_dquot_trans")) {
2612                 xfs_buf_relse(bp);
2613                 return XFS_ERROR(EIO);
2614         }
2615
2616         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2617
2618         ASSERT(dq_f->qlf_size == 2);
2619         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2620         bp->b_mount = mp;
2621         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2622         xfs_bdwrite(mp, bp);
2623
2624         return (0);
2625 }
2626
2627 /*
2628  * This routine is called to create an in-core extent free intent
2629  * item from the efi format structure which was logged on disk.
2630  * It allocates an in-core efi, copies the extents from the format
2631  * structure into it, and adds the efi to the AIL with the given
2632  * LSN.
2633  */
2634 STATIC int
2635 xlog_recover_do_efi_trans(
2636         xlog_t                  *log,
2637         xlog_recover_item_t     *item,
2638         xfs_lsn_t               lsn,
2639         int                     pass)
2640 {
2641         int                     error;
2642         xfs_mount_t             *mp;
2643         xfs_efi_log_item_t      *efip;
2644         xfs_efi_log_format_t    *efi_formatp;
2645
2646         if (pass == XLOG_RECOVER_PASS1) {
2647                 return 0;
2648         }
2649
2650         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2651
2652         mp = log->l_mp;
2653         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2654         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2655                                          &(efip->efi_format)))) {
2656                 xfs_efi_item_free(efip);
2657                 return error;
2658         }
2659         efip->efi_next_extent = efi_formatp->efi_nextents;
2660         efip->efi_flags |= XFS_EFI_COMMITTED;
2661
2662         spin_lock(&log->l_ailp->xa_lock);
2663         /*
2664          * xfs_trans_ail_update() drops the AIL lock.
2665          */
2666         xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
2667         return 0;
2668 }
2669
2670
2671 /*
2672  * This routine is called when an efd format structure is found in
2673  * a committed transaction in the log.  It's purpose is to cancel
2674  * the corresponding efi if it was still in the log.  To do this
2675  * it searches the AIL for the efi with an id equal to that in the
2676  * efd format structure.  If we find it, we remove the efi from the
2677  * AIL and free it.
2678  */
2679 STATIC void
2680 xlog_recover_do_efd_trans(
2681         xlog_t                  *log,
2682         xlog_recover_item_t     *item,
2683         int                     pass)
2684 {
2685         xfs_efd_log_format_t    *efd_formatp;
2686         xfs_efi_log_item_t      *efip = NULL;
2687         xfs_log_item_t          *lip;
2688         __uint64_t              efi_id;
2689         struct xfs_ail_cursor   cur;
2690         struct xfs_ail          *ailp = log->l_ailp;
2691
2692         if (pass == XLOG_RECOVER_PASS1) {
2693                 return;
2694         }
2695
2696         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2697         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2698                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2699                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2700                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2701         efi_id = efd_formatp->efd_efi_id;
2702
2703         /*
2704          * Search for the efi with the id in the efd format structure
2705          * in the AIL.
2706          */
2707         spin_lock(&ailp->xa_lock);
2708         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2709         while (lip != NULL) {
2710                 if (lip->li_type == XFS_LI_EFI) {
2711                         efip = (xfs_efi_log_item_t *)lip;
2712                         if (efip->efi_format.efi_id == efi_id) {
2713                                 /*
2714                                  * xfs_trans_ail_delete() drops the
2715                                  * AIL lock.
2716                                  */
2717                                 xfs_trans_ail_delete(ailp, lip);
2718                                 xfs_efi_item_free(efip);
2719                                 spin_lock(&ailp->xa_lock);
2720                                 break;
2721                         }
2722                 }
2723                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2724         }
2725         xfs_trans_ail_cursor_done(ailp, &cur);
2726         spin_unlock(&ailp->xa_lock);
2727 }
2728
2729 /*
2730  * Perform the transaction
2731  *
2732  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2733  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2734  */
2735 STATIC int
2736 xlog_recover_do_trans(
2737         xlog_t                  *log,
2738         xlog_recover_t          *trans,
2739         int                     pass)
2740 {
2741         int                     error = 0;
2742         xlog_recover_item_t     *item, *first_item;
2743
2744         if ((error = xlog_recover_reorder_trans(trans)))
2745                 return error;
2746         first_item = item = trans->r_itemq;
2747         do {
2748                 /*
2749                  * we don't need to worry about the block number being
2750                  * truncated in > 1 TB buffers because in user-land,
2751                  * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2752                  * the blknos will get through the user-mode buffer
2753                  * cache properly.  The only bad case is o32 kernels
2754                  * where xfs_daddr_t is 32-bits but mount will warn us
2755                  * off a > 1 TB filesystem before we get here.
2756                  */
2757                 if ((ITEM_TYPE(item) == XFS_LI_BUF)) {
2758                         if  ((error = xlog_recover_do_buffer_trans(log, item,
2759                                                                  pass)))
2760                                 break;
2761                 } else if ((ITEM_TYPE(item) == XFS_LI_INODE)) {
2762                         if ((error = xlog_recover_do_inode_trans(log, item,
2763                                                                 pass)))
2764                                 break;
2765                 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2766                         if ((error = xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2767                                                   pass)))
2768                                 break;
2769                 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2770                         xlog_recover_do_efd_trans(log, item, pass);
2771                 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
2772                         if ((error = xlog_recover_do_dquot_trans(log, item,
2773                                                                    pass)))
2774                                         break;
2775                 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
2776                         if ((error = xlog_recover_do_quotaoff_trans(log, item,
2777                                                                    pass)))
2778                                         break;
2779                 } else {
2780                         xlog_warn("XFS: xlog_recover_do_trans");
2781                         ASSERT(0);
2782                         error = XFS_ERROR(EIO);
2783                         break;
2784                 }
2785                 item = item->ri_next;
2786         } while (first_item != item);
2787
2788         return error;
2789 }
2790
2791 /*
2792  * Free up any resources allocated by the transaction
2793  *
2794  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2795  */
2796 STATIC void
2797 xlog_recover_free_trans(
2798         xlog_recover_t          *trans)
2799 {
2800         xlog_recover_item_t     *first_item, *item, *free_item;
2801         int                     i;
2802
2803         item = first_item = trans->r_itemq;
2804         do {
2805                 free_item = item;
2806                 item = item->ri_next;
2807                  /* Free the regions in the item. */
2808                 for (i = 0; i < free_item->ri_cnt; i++) {
2809                         kmem_free(free_item->ri_buf[i].i_addr);
2810                 }
2811                 /* Free the item itself */
2812                 kmem_free(free_item->ri_buf);
2813                 kmem_free(free_item);
2814         } while (first_item != item);
2815         /* Free the transaction recover structure */
2816         kmem_free(trans);
2817 }
2818
2819 STATIC int
2820 xlog_recover_commit_trans(
2821         xlog_t                  *log,
2822         xlog_recover_t          **q,
2823         xlog_recover_t          *trans,
2824         int                     pass)
2825 {
2826         int                     error;
2827
2828         if ((error = xlog_recover_unlink_tid(q, trans)))
2829                 return error;
2830         if ((error = xlog_recover_do_trans(log, trans, pass)))
2831                 return error;
2832         xlog_recover_free_trans(trans);                 /* no error */
2833         return 0;
2834 }
2835
2836 STATIC int
2837 xlog_recover_unmount_trans(
2838         xlog_recover_t          *trans)
2839 {
2840         /* Do nothing now */
2841         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2842         return 0;
2843 }
2844
2845 /*
2846  * There are two valid states of the r_state field.  0 indicates that the
2847  * transaction structure is in a normal state.  We have either seen the
2848  * start of the transaction or the last operation we added was not a partial
2849  * operation.  If the last operation we added to the transaction was a
2850  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2851  *
2852  * NOTE: skip LRs with 0 data length.
2853  */
2854 STATIC int
2855 xlog_recover_process_data(
2856         xlog_t                  *log,
2857         xlog_recover_t          *rhash[],
2858         xlog_rec_header_t       *rhead,
2859         xfs_caddr_t             dp,
2860         int                     pass)
2861 {
2862         xfs_caddr_t             lp;
2863         int                     num_logops;
2864         xlog_op_header_t        *ohead;
2865         xlog_recover_t          *trans;
2866         xlog_tid_t              tid;
2867         int                     error;
2868         unsigned long           hash;
2869         uint                    flags;
2870
2871         lp = dp + be32_to_cpu(rhead->h_len);
2872         num_logops = be32_to_cpu(rhead->h_num_logops);
2873
2874         /* check the log format matches our own - else we can't recover */
2875         if (xlog_header_check_recover(log->l_mp, rhead))
2876                 return (XFS_ERROR(EIO));
2877
2878         while ((dp < lp) && num_logops) {
2879                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2880                 ohead = (xlog_op_header_t *)dp;
2881                 dp += sizeof(xlog_op_header_t);
2882                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2883                     ohead->oh_clientid != XFS_LOG) {
2884                         xlog_warn(
2885                 "XFS: xlog_recover_process_data: bad clientid");
2886                         ASSERT(0);
2887                         return (XFS_ERROR(EIO));
2888                 }
2889                 tid = be32_to_cpu(ohead->oh_tid);
2890                 hash = XLOG_RHASH(tid);
2891                 trans = xlog_recover_find_tid(rhash[hash], tid);
2892                 if (trans == NULL) {               /* not found; add new tid */
2893                         if (ohead->oh_flags & XLOG_START_TRANS)
2894                                 xlog_recover_new_tid(&rhash[hash], tid,
2895                                         be64_to_cpu(rhead->h_lsn));
2896                 } else {
2897                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2898                                 xlog_warn(
2899                         "XFS: xlog_recover_process_data: bad length");
2900                                 WARN_ON(1);
2901                                 return (XFS_ERROR(EIO));
2902                         }
2903                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2904                         if (flags & XLOG_WAS_CONT_TRANS)
2905                                 flags &= ~XLOG_CONTINUE_TRANS;
2906                         switch (flags) {
2907                         case XLOG_COMMIT_TRANS:
2908                                 error = xlog_recover_commit_trans(log,
2909                                                 &rhash[hash], trans, pass);
2910                                 break;
2911                         case XLOG_UNMOUNT_TRANS:
2912                                 error = xlog_recover_unmount_trans(trans);
2913                                 break;
2914                         case XLOG_WAS_CONT_TRANS:
2915                                 error = xlog_recover_add_to_cont_trans(trans,
2916                                                 dp, be32_to_cpu(ohead->oh_len));
2917                                 break;
2918                         case XLOG_START_TRANS:
2919                                 xlog_warn(
2920                         "XFS: xlog_recover_process_data: bad transaction");
2921                                 ASSERT(0);
2922                                 error = XFS_ERROR(EIO);
2923                                 break;
2924                         case 0:
2925                         case XLOG_CONTINUE_TRANS:
2926                                 error = xlog_recover_add_to_trans(trans,
2927                                                 dp, be32_to_cpu(ohead->oh_len));
2928                                 break;
2929                         default:
2930                                 xlog_warn(
2931                         "XFS: xlog_recover_process_data: bad flag");
2932                                 ASSERT(0);
2933                                 error = XFS_ERROR(EIO);
2934                                 break;
2935                         }
2936                         if (error)
2937                                 return error;
2938                 }
2939                 dp += be32_to_cpu(ohead->oh_len);
2940                 num_logops--;
2941         }
2942         return 0;
2943 }
2944
2945 /*
2946  * Process an extent free intent item that was recovered from
2947  * the log.  We need to free the extents that it describes.
2948  */
2949 STATIC int
2950 xlog_recover_process_efi(
2951         xfs_mount_t             *mp,
2952         xfs_efi_log_item_t      *efip)
2953 {
2954         xfs_efd_log_item_t      *efdp;
2955         xfs_trans_t             *tp;
2956         int                     i;
2957         int                     error = 0;
2958         xfs_extent_t            *extp;
2959         xfs_fsblock_t           startblock_fsb;
2960
2961         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
2962
2963         /*
2964          * First check the validity of the extents described by the
2965          * EFI.  If any are bad, then assume that all are bad and
2966          * just toss the EFI.
2967          */
2968         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2969                 extp = &(efip->efi_format.efi_extents[i]);
2970                 startblock_fsb = XFS_BB_TO_FSB(mp,
2971                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
2972                 if ((startblock_fsb == 0) ||
2973                     (extp->ext_len == 0) ||
2974                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
2975                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
2976                         /*
2977                          * This will pull the EFI from the AIL and
2978                          * free the memory associated with it.
2979                          */
2980                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
2981                         return XFS_ERROR(EIO);
2982                 }
2983         }
2984
2985         tp = xfs_trans_alloc(mp, 0);
2986         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
2987         if (error)
2988                 goto abort_error;
2989         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
2990
2991         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
2992                 extp = &(efip->efi_format.efi_extents[i]);
2993                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
2994                 if (error)
2995                         goto abort_error;
2996                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
2997                                          extp->ext_len);
2998         }
2999
3000         efip->efi_flags |= XFS_EFI_RECOVERED;
3001         error = xfs_trans_commit(tp, 0);
3002         return error;
3003
3004 abort_error:
3005         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3006         return error;
3007 }
3008
3009 /*
3010  * When this is called, all of the EFIs which did not have
3011  * corresponding EFDs should be in the AIL.  What we do now
3012  * is free the extents associated with each one.
3013  *
3014  * Since we process the EFIs in normal transactions, they
3015  * will be removed at some point after the commit.  This prevents
3016  * us from just walking down the list processing each one.
3017  * We'll use a flag in the EFI to skip those that we've already
3018  * processed and use the AIL iteration mechanism's generation
3019  * count to try to speed this up at least a bit.
3020  *
3021  * When we start, we know that the EFIs are the only things in
3022  * the AIL.  As we process them, however, other items are added
3023  * to the AIL.  Since everything added to the AIL must come after
3024  * everything already in the AIL, we stop processing as soon as
3025  * we see something other than an EFI in the AIL.
3026  */
3027 STATIC int
3028 xlog_recover_process_efis(
3029         xlog_t                  *log)
3030 {
3031         xfs_log_item_t          *lip;
3032         xfs_efi_log_item_t      *efip;
3033         int                     error = 0;
3034         struct xfs_ail_cursor   cur;
3035         struct xfs_ail          *ailp;
3036
3037         ailp = log->l_ailp;
3038         spin_lock(&ailp->xa_lock);
3039         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3040         while (lip != NULL) {
3041                 /*
3042                  * We're done when we see something other than an EFI.
3043                  * There should be no EFIs left in the AIL now.
3044                  */
3045                 if (lip->li_type != XFS_LI_EFI) {
3046 #ifdef DEBUG
3047                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3048                                 ASSERT(lip->li_type != XFS_LI_EFI);
3049 #endif
3050                         break;
3051                 }
3052
3053                 /*
3054                  * Skip EFIs that we've already processed.
3055                  */
3056                 efip = (xfs_efi_log_item_t *)lip;
3057                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3058                         lip = xfs_trans_ail_cursor_next(ailp, &cur);
3059                         continue;
3060                 }
3061
3062                 spin_unlock(&ailp->xa_lock);
3063                 error = xlog_recover_process_efi(log->l_mp, efip);
3064                 spin_lock(&ailp->xa_lock);
3065                 if (error)
3066                         goto out;
3067                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3068         }
3069 out:
3070         xfs_trans_ail_cursor_done(ailp, &cur);
3071         spin_unlock(&ailp->xa_lock);
3072         return error;
3073 }
3074
3075 /*
3076  * This routine performs a transaction to null out a bad inode pointer
3077  * in an agi unlinked inode hash bucket.
3078  */
3079 STATIC void
3080 xlog_recover_clear_agi_bucket(
3081         xfs_mount_t     *mp,
3082         xfs_agnumber_t  agno,
3083         int             bucket)
3084 {
3085         xfs_trans_t     *tp;
3086         xfs_agi_t       *agi;
3087         xfs_buf_t       *agibp;
3088         int             offset;
3089         int             error;
3090
3091         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3092         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3093                                   0, 0, 0);
3094         if (error)
3095                 goto out_abort;
3096
3097         error = xfs_read_agi(mp, tp, agno, &agibp);
3098         if (error)
3099                 goto out_abort;
3100
3101         agi = XFS_BUF_TO_AGI(agibp);
3102         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3103         offset = offsetof(xfs_agi_t, agi_unlinked) +
3104                  (sizeof(xfs_agino_t) * bucket);
3105         xfs_trans_log_buf(tp, agibp, offset,
3106                           (offset + sizeof(xfs_agino_t) - 1));
3107
3108         error = xfs_trans_commit(tp, 0);
3109         if (error)
3110                 goto out_error;
3111         return;
3112
3113 out_abort:
3114         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3115 out_error:
3116         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3117                         "failed to clear agi %d. Continuing.", agno);
3118         return;
3119 }
3120
3121 STATIC xfs_agino_t
3122 xlog_recover_process_one_iunlink(
3123         struct xfs_mount                *mp,
3124         xfs_agnumber_t                  agno,
3125         xfs_agino_t                     agino,
3126         int                             bucket)
3127 {
3128         struct xfs_buf                  *ibp;
3129         struct xfs_dinode               *dip;
3130         struct xfs_inode                *ip;
3131         xfs_ino_t                       ino;
3132         int                             error;
3133
3134         ino = XFS_AGINO_TO_INO(mp, agno, agino);
3135         error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3136         if (error)
3137                 goto fail;
3138
3139         /*
3140          * Get the on disk inode to find the next inode in the bucket.
3141          */
3142         error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
3143         if (error)
3144                 goto fail_iput;
3145
3146         ASSERT(ip->i_d.di_nlink == 0);
3147         ASSERT(ip->i_d.di_mode != 0);
3148
3149         /* setup for the next pass */
3150         agino = be32_to_cpu(dip->di_next_unlinked);
3151         xfs_buf_relse(ibp);
3152
3153         /*
3154          * Prevent any DMAPI event from being sent when the reference on
3155          * the inode is dropped.
3156          */
3157         ip->i_d.di_dmevmask = 0;
3158
3159         IRELE(ip);
3160         return agino;
3161
3162  fail_iput:
3163         IRELE(ip);
3164  fail:
3165         /*
3166          * We can't read in the inode this bucket points to, or this inode
3167          * is messed up.  Just ditch this bucket of inodes.  We will lose
3168          * some inodes and space, but at least we won't hang.
3169          *
3170          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3171          * clear the inode pointer in the bucket.
3172          */
3173         xlog_recover_clear_agi_bucket(mp, agno, bucket);
3174         return NULLAGINO;
3175 }
3176
3177 /*
3178  * xlog_iunlink_recover
3179  *
3180  * This is called during recovery to process any inodes which
3181  * we unlinked but not freed when the system crashed.  These
3182  * inodes will be on the lists in the AGI blocks.  What we do
3183  * here is scan all the AGIs and fully truncate and free any
3184  * inodes found on the lists.  Each inode is removed from the
3185  * lists when it has been fully truncated and is freed.  The
3186  * freeing of the inode and its removal from the list must be
3187  * atomic.
3188  */
3189 void
3190 xlog_recover_process_iunlinks(
3191         xlog_t          *log)
3192 {
3193         xfs_mount_t     *mp;
3194         xfs_agnumber_t  agno;
3195         xfs_agi_t       *agi;
3196         xfs_buf_t       *agibp;
3197         xfs_agino_t     agino;
3198         int             bucket;
3199         int             error;
3200         uint            mp_dmevmask;
3201
3202         mp = log->l_mp;
3203
3204         /*
3205          * Prevent any DMAPI event from being sent while in this function.
3206          */
3207         mp_dmevmask = mp->m_dmevmask;
3208         mp->m_dmevmask = 0;
3209
3210         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3211                 /*
3212                  * Find the agi for this ag.
3213                  */
3214                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3215                 if (error) {
3216                         /*
3217                          * AGI is b0rked. Don't process it.
3218                          *
3219                          * We should probably mark the filesystem as corrupt
3220                          * after we've recovered all the ag's we can....
3221                          */
3222                         continue;
3223                 }
3224                 agi = XFS_BUF_TO_AGI(agibp);
3225
3226                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3227                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3228                         while (agino != NULLAGINO) {
3229                                 /*
3230                                  * Release the agi buffer so that it can
3231                                  * be acquired in the normal course of the
3232                                  * transaction to truncate and free the inode.
3233                                  */
3234                                 xfs_buf_relse(agibp);
3235
3236                                 agino = xlog_recover_process_one_iunlink(mp,
3237                                                         agno, agino, bucket);
3238
3239                                 /*
3240                                  * Reacquire the agibuffer and continue around
3241                                  * the loop. This should never fail as we know
3242                                  * the buffer was good earlier on.
3243                                  */
3244                                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3245                                 ASSERT(error == 0);
3246                                 agi = XFS_BUF_TO_AGI(agibp);
3247                         }
3248                 }
3249
3250                 /*
3251                  * Release the buffer for the current agi so we can
3252                  * go on to the next one.
3253                  */
3254                 xfs_buf_relse(agibp);
3255         }
3256
3257         mp->m_dmevmask = mp_dmevmask;
3258 }
3259
3260
3261 #ifdef DEBUG
3262 STATIC void
3263 xlog_pack_data_checksum(
3264         xlog_t          *log,
3265         xlog_in_core_t  *iclog,
3266         int             size)
3267 {
3268         int             i;
3269         __be32          *up;
3270         uint            chksum = 0;
3271
3272         up = (__be32 *)iclog->ic_datap;
3273         /* divide length by 4 to get # words */
3274         for (i = 0; i < (size >> 2); i++) {
3275                 chksum ^= be32_to_cpu(*up);
3276                 up++;
3277         }
3278         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3279 }
3280 #else
3281 #define xlog_pack_data_checksum(log, iclog, size)
3282 #endif
3283
3284 /*
3285  * Stamp cycle number in every block
3286  */
3287 void
3288 xlog_pack_data(
3289         xlog_t                  *log,
3290         xlog_in_core_t          *iclog,
3291         int                     roundoff)
3292 {
3293         int                     i, j, k;
3294         int                     size = iclog->ic_offset + roundoff;
3295         __be32                  cycle_lsn;
3296         xfs_caddr_t             dp;
3297
3298         xlog_pack_data_checksum(log, iclog, size);
3299
3300         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3301
3302         dp = iclog->ic_datap;
3303         for (i = 0; i < BTOBB(size) &&
3304                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3305                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3306                 *(__be32 *)dp = cycle_lsn;
3307                 dp += BBSIZE;
3308         }
3309
3310         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3311                 xlog_in_core_2_t *xhdr = iclog->ic_data;
3312
3313                 for ( ; i < BTOBB(size); i++) {
3314                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3315                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3316                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3317                         *(__be32 *)dp = cycle_lsn;
3318                         dp += BBSIZE;
3319                 }
3320
3321                 for (i = 1; i < log->l_iclog_heads; i++) {
3322                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3323                 }
3324         }
3325 }
3326
3327 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3328 STATIC void
3329 xlog_unpack_data_checksum(
3330         xlog_rec_header_t       *rhead,
3331         xfs_caddr_t             dp,
3332         xlog_t                  *log)
3333 {
3334         __be32                  *up = (__be32 *)dp;
3335         uint                    chksum = 0;
3336         int                     i;
3337
3338         /* divide length by 4 to get # words */
3339         for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) {
3340                 chksum ^= be32_to_cpu(*up);
3341                 up++;
3342         }
3343         if (chksum != be32_to_cpu(rhead->h_chksum)) {
3344             if (rhead->h_chksum ||
3345                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3346                     cmn_err(CE_DEBUG,
3347                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n",
3348                             be32_to_cpu(rhead->h_chksum), chksum);
3349                     cmn_err(CE_DEBUG,
3350 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3351                     if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3352                             cmn_err(CE_DEBUG,
3353                                 "XFS: LogR this is a LogV2 filesystem\n");
3354                     }
3355                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3356             }
3357         }
3358 }
3359 #else
3360 #define xlog_unpack_data_checksum(rhead, dp, log)
3361 #endif
3362
3363 STATIC void
3364 xlog_unpack_data(
3365         xlog_rec_header_t       *rhead,
3366         xfs_caddr_t             dp,
3367         xlog_t                  *log)
3368 {
3369         int                     i, j, k;
3370
3371         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3372                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3373                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3374                 dp += BBSIZE;
3375         }
3376
3377         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3378                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3379                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3380                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3381                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3382                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3383                         dp += BBSIZE;
3384                 }
3385         }
3386
3387         xlog_unpack_data_checksum(rhead, dp, log);
3388 }
3389
3390 STATIC int
3391 xlog_valid_rec_header(
3392         xlog_t                  *log,
3393         xlog_rec_header_t       *rhead,
3394         xfs_daddr_t             blkno)
3395 {
3396         int                     hlen;
3397
3398         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3399                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3400                                 XFS_ERRLEVEL_LOW, log->l_mp);
3401                 return XFS_ERROR(EFSCORRUPTED);
3402         }
3403         if (unlikely(
3404             (!rhead->h_version ||
3405             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3406                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3407                         __func__, be32_to_cpu(rhead->h_version));
3408                 return XFS_ERROR(EIO);
3409         }
3410
3411         /* LR body must have data or it wouldn't have been written */
3412         hlen = be32_to_cpu(rhead->h_len);
3413         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3414                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3415                                 XFS_ERRLEVEL_LOW, log->l_mp);
3416                 return XFS_ERROR(EFSCORRUPTED);
3417         }
3418         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3419                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3420                                 XFS_ERRLEVEL_LOW, log->l_mp);
3421                 return XFS_ERROR(EFSCORRUPTED);
3422         }
3423         return 0;
3424 }
3425
3426 /*
3427  * Read the log from tail to head and process the log records found.
3428  * Handle the two cases where the tail and head are in the same cycle
3429  * and where the active portion of the log wraps around the end of
3430  * the physical log separately.  The pass parameter is passed through
3431  * to the routines called to process the data and is not looked at
3432  * here.
3433  */
3434 STATIC int
3435 xlog_do_recovery_pass(
3436         xlog_t                  *log,
3437         xfs_daddr_t             head_blk,
3438         xfs_daddr_t             tail_blk,
3439         int                     pass)
3440 {
3441         xlog_rec_header_t       *rhead;
3442         xfs_daddr_t             blk_no;
3443         xfs_caddr_t             bufaddr, offset;
3444         xfs_buf_t               *hbp, *dbp;
3445         int                     error = 0, h_size;
3446         int                     bblks, split_bblks;
3447         int                     hblks, split_hblks, wrapped_hblks;
3448         xlog_recover_t          *rhash[XLOG_RHASH_SIZE];
3449
3450         ASSERT(head_blk != tail_blk);
3451
3452         /*
3453          * Read the header of the tail block and get the iclog buffer size from
3454          * h_size.  Use this to tell how many sectors make up the log header.
3455          */
3456         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3457                 /*
3458                  * When using variable length iclogs, read first sector of
3459                  * iclog header and extract the header size from it.  Get a
3460                  * new hbp that is the correct size.
3461                  */
3462                 hbp = xlog_get_bp(log, 1);
3463                 if (!hbp)
3464                         return ENOMEM;
3465                 if ((error = xlog_bread(log, tail_blk, 1, hbp)))
3466                         goto bread_err1;
3467                 offset = xlog_align(log, tail_blk, 1, hbp);
3468                 rhead = (xlog_rec_header_t *)offset;
3469                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3470                 if (error)
3471                         goto bread_err1;
3472                 h_size = be32_to_cpu(rhead->h_size);
3473                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3474                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3475                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3476                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3477                                 hblks++;
3478                         xlog_put_bp(hbp);
3479                         hbp = xlog_get_bp(log, hblks);
3480                 } else {
3481                         hblks = 1;
3482                 }
3483         } else {
3484                 ASSERT(log->l_sectbb_log == 0);
3485                 hblks = 1;
3486                 hbp = xlog_get_bp(log, 1);
3487                 h_size = XLOG_BIG_RECORD_BSIZE;
3488         }
3489
3490         if (!hbp)
3491                 return ENOMEM;
3492         dbp = xlog_get_bp(log, BTOBB(h_size));
3493         if (!dbp) {
3494                 xlog_put_bp(hbp);
3495                 return ENOMEM;
3496         }
3497
3498         memset(rhash, 0, sizeof(rhash));
3499         if (tail_blk <= head_blk) {
3500                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3501                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3502                                 goto bread_err2;
3503                         offset = xlog_align(log, blk_no, hblks, hbp);
3504                         rhead = (xlog_rec_header_t *)offset;
3505                         error = xlog_valid_rec_header(log, rhead, blk_no);
3506                         if (error)
3507                                 goto bread_err2;
3508
3509                         /* blocks in data section */
3510                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3511                         error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3512                         if (error)
3513                                 goto bread_err2;
3514                         offset = xlog_align(log, blk_no + hblks, bblks, dbp);
3515                         xlog_unpack_data(rhead, offset, log);
3516                         if ((error = xlog_recover_process_data(log,
3517                                                 rhash, rhead, offset, pass)))
3518                                 goto bread_err2;
3519                         blk_no += bblks + hblks;
3520                 }
3521         } else {
3522                 /*
3523                  * Perform recovery around the end of the physical log.
3524                  * When the head is not on the same cycle number as the tail,
3525                  * we can't do a sequential recovery as above.
3526                  */
3527                 blk_no = tail_blk;
3528                 while (blk_no < log->l_logBBsize) {
3529                         /*
3530                          * Check for header wrapping around physical end-of-log
3531                          */
3532                         offset = NULL;
3533                         split_hblks = 0;
3534                         wrapped_hblks = 0;
3535                         if (blk_no + hblks <= log->l_logBBsize) {
3536                                 /* Read header in one read */
3537                                 error = xlog_bread(log, blk_no, hblks, hbp);
3538                                 if (error)
3539                                         goto bread_err2;
3540                                 offset = xlog_align(log, blk_no, hblks, hbp);
3541                         } else {
3542                                 /* This LR is split across physical log end */
3543                                 if (blk_no != log->l_logBBsize) {
3544                                         /* some data before physical log end */
3545                                         ASSERT(blk_no <= INT_MAX);
3546                                         split_hblks = log->l_logBBsize - (int)blk_no;
3547                                         ASSERT(split_hblks > 0);
3548                                         if ((error = xlog_bread(log, blk_no,
3549                                                         split_hblks, hbp)))
3550                                                 goto bread_err2;
3551                                         offset = xlog_align(log, blk_no,
3552                                                         split_hblks, hbp);
3553                                 }
3554                                 /*
3555                                  * Note: this black magic still works with
3556                                  * large sector sizes (non-512) only because:
3557                                  * - we increased the buffer size originally
3558                                  *   by 1 sector giving us enough extra space
3559                                  *   for the second read;
3560                                  * - the log start is guaranteed to be sector
3561                                  *   aligned;
3562                                  * - we read the log end (LR header start)
3563                                  *   _first_, then the log start (LR header end)
3564                                  *   - order is important.
3565                                  */
3566                                 wrapped_hblks = hblks - split_hblks;
3567                                 bufaddr = XFS_BUF_PTR(hbp);
3568                                 error = XFS_BUF_SET_PTR(hbp,
3569                                                 bufaddr + BBTOB(split_hblks),
3570                                                 BBTOB(hblks - split_hblks));
3571                                 if (!error)
3572                                         error = xlog_bread(log, 0,
3573                                                         wrapped_hblks, hbp);
3574                                 if (!error)
3575                                         error = XFS_BUF_SET_PTR(hbp, bufaddr,
3576                                                         BBTOB(hblks));
3577                                 if (error)
3578                                         goto bread_err2;
3579                                 if (!offset)
3580                                         offset = xlog_align(log, 0,
3581                                                         wrapped_hblks, hbp);
3582                         }
3583                         rhead = (xlog_rec_header_t *)offset;
3584                         error = xlog_valid_rec_header(log, rhead,
3585                                                 split_hblks ? blk_no : 0);
3586                         if (error)
3587                                 goto bread_err2;
3588
3589                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3590                         blk_no += hblks;
3591
3592                         /* Read in data for log record */
3593                         if (blk_no + bblks <= log->l_logBBsize) {
3594                                 error = xlog_bread(log, blk_no, bblks, dbp);
3595                                 if (error)
3596                                         goto bread_err2;
3597                                 offset = xlog_align(log, blk_no, bblks, dbp);
3598                         } else {
3599                                 /* This log record is split across the
3600                                  * physical end of log */
3601                                 offset = NULL;
3602                                 split_bblks = 0;
3603                                 if (blk_no != log->l_logBBsize) {
3604                                         /* some data is before the physical
3605                                          * end of log */
3606                                         ASSERT(!wrapped_hblks);
3607                                         ASSERT(blk_no <= INT_MAX);
3608                                         split_bblks =
3609                                                 log->l_logBBsize - (int)blk_no;
3610                                         ASSERT(split_bblks > 0);
3611                                         if ((error = xlog_bread(log, blk_no,
3612                                                         split_bblks, dbp)))
3613                                                 goto bread_err2;
3614                                         offset = xlog_align(log, blk_no,
3615                                                         split_bblks, dbp);
3616                                 }
3617                                 /*
3618                                  * Note: this black magic still works with
3619                                  * large sector sizes (non-512) only because:
3620                                  * - we increased the buffer size originally
3621                                  *   by 1 sector giving us enough extra space
3622                                  *   for the second read;
3623                                  * - the log start is guaranteed to be sector
3624                                  *   aligned;
3625                                  * - we read the log end (LR header start)
3626                                  *   _first_, then the log start (LR header end)
3627                                  *   - order is important.
3628                                  */
3629                                 bufaddr = XFS_BUF_PTR(dbp);
3630                                 error = XFS_BUF_SET_PTR(dbp,
3631                                                 bufaddr + BBTOB(split_bblks),
3632                                                 BBTOB(bblks - split_bblks));
3633                                 if (!error)
3634                                         error = xlog_bread(log, wrapped_hblks,
3635                                                         bblks - split_bblks,
3636                                                         dbp);
3637                                 if (!error)
3638                                         error = XFS_BUF_SET_PTR(dbp, bufaddr,
3639                                                         h_size);
3640                                 if (error)
3641                                         goto bread_err2;
3642                                 if (!offset)
3643                                         offset = xlog_align(log, wrapped_hblks,
3644                                                 bblks - split_bblks, dbp);
3645                         }
3646                         xlog_unpack_data(rhead, offset, log);
3647                         if ((error = xlog_recover_process_data(log, rhash,
3648                                                         rhead, offset, pass)))
3649                                 goto bread_err2;
3650                         blk_no += bblks;
3651                 }
3652
3653                 ASSERT(blk_no >= log->l_logBBsize);
3654                 blk_no -= log->l_logBBsize;
3655
3656                 /* read first part of physical log */
3657                 while (blk_no < head_blk) {
3658                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3659                                 goto bread_err2;
3660                         offset = xlog_align(log, blk_no, hblks, hbp);
3661                         rhead = (xlog_rec_header_t *)offset;
3662                         error = xlog_valid_rec_header(log, rhead, blk_no);
3663                         if (error)
3664                                 goto bread_err2;
3665                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3666                         if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3667                                 goto bread_err2;
3668                         offset = xlog_align(log, blk_no+hblks, bblks, dbp);
3669                         xlog_unpack_data(rhead, offset, log);
3670                         if ((error = xlog_recover_process_data(log, rhash,
3671                                                         rhead, offset, pass)))
3672                                 goto bread_err2;
3673                         blk_no += bblks + hblks;
3674                 }
3675         }
3676
3677  bread_err2:
3678         xlog_put_bp(dbp);
3679  bread_err1:
3680         xlog_put_bp(hbp);
3681         return error;
3682 }
3683
3684 /*
3685  * Do the recovery of the log.  We actually do this in two phases.
3686  * The two passes are necessary in order to implement the function
3687  * of cancelling a record written into the log.  The first pass
3688  * determines those things which have been cancelled, and the
3689  * second pass replays log items normally except for those which
3690  * have been cancelled.  The handling of the replay and cancellations
3691  * takes place in the log item type specific routines.
3692  *
3693  * The table of items which have cancel records in the log is allocated
3694  * and freed at this level, since only here do we know when all of
3695  * the log recovery has been completed.
3696  */
3697 STATIC int
3698 xlog_do_log_recovery(
3699         xlog_t          *log,
3700         xfs_daddr_t     head_blk,
3701         xfs_daddr_t     tail_blk)
3702 {
3703         int             error;
3704
3705         ASSERT(head_blk != tail_blk);
3706
3707         /*
3708          * First do a pass to find all of the cancelled buf log items.
3709          * Store them in the buf_cancel_table for use in the second pass.
3710          */
3711         log->l_buf_cancel_table =
3712                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3713                                                  sizeof(xfs_buf_cancel_t*),
3714                                                  KM_SLEEP);
3715         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3716                                       XLOG_RECOVER_PASS1);
3717         if (error != 0) {
3718                 kmem_free(log->l_buf_cancel_table);
3719                 log->l_buf_cancel_table = NULL;
3720                 return error;
3721         }
3722         /*
3723          * Then do a second pass to actually recover the items in the log.
3724          * When it is complete free the table of buf cancel items.
3725          */
3726         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3727                                       XLOG_RECOVER_PASS2);
3728 #ifdef DEBUG
3729         if (!error) {
3730                 int     i;
3731
3732                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3733                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3734         }
3735 #endif  /* DEBUG */
3736
3737         kmem_free(log->l_buf_cancel_table);
3738         log->l_buf_cancel_table = NULL;
3739
3740         return error;
3741 }
3742
3743 /*
3744  * Do the actual recovery
3745  */
3746 STATIC int
3747 xlog_do_recover(
3748         xlog_t          *log,
3749         xfs_daddr_t     head_blk,
3750         xfs_daddr_t     tail_blk)
3751 {
3752         int             error;
3753         xfs_buf_t       *bp;
3754         xfs_sb_t        *sbp;
3755
3756         /*
3757          * First replay the images in the log.
3758          */
3759         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3760         if (error) {
3761                 return error;
3762         }
3763
3764         XFS_bflush(log->l_mp->m_ddev_targp);
3765
3766         /*
3767          * If IO errors happened during recovery, bail out.
3768          */
3769         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3770                 return (EIO);
3771         }
3772
3773         /*
3774          * We now update the tail_lsn since much of the recovery has completed
3775          * and there may be space available to use.  If there were no extent
3776          * or iunlinks, we can free up the entire log and set the tail_lsn to
3777          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3778          * lsn of the last known good LR on disk.  If there are extent frees
3779          * or iunlinks they will have some entries in the AIL; so we look at
3780          * the AIL to determine how to set the tail_lsn.
3781          */
3782         xlog_assign_tail_lsn(log->l_mp);
3783
3784         /*
3785          * Now that we've finished replaying all buffer and inode
3786          * updates, re-read in the superblock.
3787          */
3788         bp = xfs_getsb(log->l_mp, 0);
3789         XFS_BUF_UNDONE(bp);
3790         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3791         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3792         XFS_BUF_READ(bp);
3793         XFS_BUF_UNASYNC(bp);
3794         xfsbdstrat(log->l_mp, bp);
3795         error = xfs_iowait(bp);
3796         if (error) {
3797                 xfs_ioerror_alert("xlog_do_recover",
3798                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3799                 ASSERT(0);
3800                 xfs_buf_relse(bp);
3801                 return error;
3802         }
3803
3804         /* Convert superblock from on-disk format */
3805         sbp = &log->l_mp->m_sb;
3806         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3807         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3808         ASSERT(xfs_sb_good_version(sbp));
3809         xfs_buf_relse(bp);
3810
3811         /* We've re-read the superblock so re-initialize per-cpu counters */
3812         xfs_icsb_reinit_counters(log->l_mp);
3813
3814         xlog_recover_check_summary(log);
3815
3816         /* Normal transactions can now occur */
3817         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3818         return 0;
3819 }
3820
3821 /*
3822  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3823  *
3824  * Return error or zero.
3825  */
3826 int
3827 xlog_recover(
3828         xlog_t          *log)
3829 {
3830         xfs_daddr_t     head_blk, tail_blk;
3831         int             error;
3832
3833         /* find the tail of the log */
3834         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3835                 return error;
3836
3837         if (tail_blk != head_blk) {
3838                 /* There used to be a comment here:
3839                  *
3840                  * disallow recovery on read-only mounts.  note -- mount
3841                  * checks for ENOSPC and turns it into an intelligent
3842                  * error message.
3843                  * ...but this is no longer true.  Now, unless you specify
3844                  * NORECOVERY (in which case this function would never be
3845                  * called), we just go ahead and recover.  We do this all
3846                  * under the vfs layer, so we can get away with it unless
3847                  * the device itself is read-only, in which case we fail.
3848                  */
3849                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3850                         return error;
3851                 }
3852
3853                 cmn_err(CE_NOTE,
3854                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3855                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3856                         log->l_mp->m_logname : "internal");
3857
3858                 error = xlog_do_recover(log, head_blk, tail_blk);
3859                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3860         }
3861         return error;
3862 }
3863
3864 /*
3865  * In the first part of recovery we replay inodes and buffers and build
3866  * up the list of extent free items which need to be processed.  Here
3867  * we process the extent free items and clean up the on disk unlinked
3868  * inode lists.  This is separated from the first part of recovery so
3869  * that the root and real-time bitmap inodes can be read in from disk in
3870  * between the two stages.  This is necessary so that we can free space
3871  * in the real-time portion of the file system.
3872  */
3873 int
3874 xlog_recover_finish(
3875         xlog_t          *log)
3876 {
3877         /*
3878          * Now we're ready to do the transactions needed for the
3879          * rest of recovery.  Start with completing all the extent
3880          * free intent records and then process the unlinked inode
3881          * lists.  At this point, we essentially run in normal mode
3882          * except that we're still performing recovery actions
3883          * rather than accepting new requests.
3884          */
3885         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3886                 int     error;
3887                 error = xlog_recover_process_efis(log);
3888                 if (error) {
3889                         cmn_err(CE_ALERT,
3890                                 "Failed to recover EFIs on filesystem: %s",
3891                                 log->l_mp->m_fsname);
3892                         return error;
3893                 }
3894                 /*
3895                  * Sync the log to get all the EFIs out of the AIL.
3896                  * This isn't absolutely necessary, but it helps in
3897                  * case the unlink transactions would have problems
3898                  * pushing the EFIs out of the way.
3899                  */
3900                 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3901                               (XFS_LOG_FORCE | XFS_LOG_SYNC));
3902
3903                 xlog_recover_process_iunlinks(log);
3904
3905                 xlog_recover_check_summary(log);
3906
3907                 cmn_err(CE_NOTE,
3908                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3909                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3910                         log->l_mp->m_logname : "internal");
3911                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3912         } else {
3913                 cmn_err(CE_DEBUG,
3914                         "!Ending clean XFS mount for filesystem: %s\n",
3915                         log->l_mp->m_fsname);
3916         }
3917         return 0;
3918 }
3919
3920
3921 #if defined(DEBUG)
3922 /*
3923  * Read all of the agf and agi counters and check that they
3924  * are consistent with the superblock counters.
3925  */
3926 void
3927 xlog_recover_check_summary(
3928         xlog_t          *log)
3929 {
3930         xfs_mount_t     *mp;
3931         xfs_agf_t       *agfp;
3932         xfs_buf_t       *agfbp;
3933         xfs_buf_t       *agibp;
3934         xfs_buf_t       *sbbp;
3935 #ifdef XFS_LOUD_RECOVERY
3936         xfs_sb_t        *sbp;
3937 #endif
3938         xfs_agnumber_t  agno;
3939         __uint64_t      freeblks;
3940         __uint64_t      itotal;
3941         __uint64_t      ifree;
3942         int             error;
3943
3944         mp = log->l_mp;
3945
3946         freeblks = 0LL;
3947         itotal = 0LL;
3948         ifree = 0LL;
3949         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3950                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3951                 if (error) {
3952                         xfs_fs_cmn_err(CE_ALERT, mp,
3953                                         "xlog_recover_check_summary(agf)"
3954                                         "agf read failed agno %d error %d",
3955                                                         agno, error);
3956                 } else {
3957                         agfp = XFS_BUF_TO_AGF(agfbp);
3958                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
3959                                     be32_to_cpu(agfp->agf_flcount);
3960                         xfs_buf_relse(agfbp);
3961                 }
3962
3963                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3964                 if (!error) {
3965                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
3966
3967                         itotal += be32_to_cpu(agi->agi_count);
3968                         ifree += be32_to_cpu(agi->agi_freecount);
3969                         xfs_buf_relse(agibp);
3970                 }
3971         }
3972
3973         sbbp = xfs_getsb(mp, 0);
3974 #ifdef XFS_LOUD_RECOVERY
3975         sbp = &mp->m_sb;
3976         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(sbbp));
3977         cmn_err(CE_NOTE,
3978                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
3979                 sbp->sb_icount, itotal);
3980         cmn_err(CE_NOTE,
3981                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
3982                 sbp->sb_ifree, ifree);
3983         cmn_err(CE_NOTE,
3984                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
3985                 sbp->sb_fdblocks, freeblks);
3986 #if 0
3987         /*
3988          * This is turned off until I account for the allocation
3989          * btree blocks which live in free space.
3990          */
3991         ASSERT(sbp->sb_icount == itotal);
3992         ASSERT(sbp->sb_ifree == ifree);
3993         ASSERT(sbp->sb_fdblocks == freeblks);
3994 #endif
3995 #endif
3996         xfs_buf_relse(sbbp);
3997 }
3998 #endif /* DEBUG */