Merge with rsync://rsync.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
[linux-2.6] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32
33 #include "xfs.h"
34 #include "xfs_macros.h"
35 #include "xfs_types.h"
36 #include "xfs_inum.h"
37 #include "xfs_log.h"
38 #include "xfs_ag.h"
39 #include "xfs_sb.h"
40 #include "xfs_trans.h"
41 #include "xfs_dir.h"
42 #include "xfs_dir2.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_error.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_alloc.h"
48 #include "xfs_attr_sf.h"
49 #include "xfs_dir_sf.h"
50 #include "xfs_dir2_sf.h"
51 #include "xfs_dinode.h"
52 #include "xfs_imap.h"
53 #include "xfs_inode_item.h"
54 #include "xfs_inode.h"
55 #include "xfs_ialloc_btree.h"
56 #include "xfs_ialloc.h"
57 #include "xfs_log_priv.h"
58 #include "xfs_buf_item.h"
59 #include "xfs_alloc_btree.h"
60 #include "xfs_log_recover.h"
61 #include "xfs_extfree_item.h"
62 #include "xfs_trans_priv.h"
63 #include "xfs_bit.h"
64 #include "xfs_quota.h"
65 #include "xfs_rw.h"
66
67 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
68 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
69 STATIC void     xlog_recover_insert_item_backq(xlog_recover_item_t **q,
70                                                xlog_recover_item_t *item);
71 #if defined(DEBUG)
72 STATIC void     xlog_recover_check_summary(xlog_t *);
73 STATIC void     xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int);
74 #else
75 #define xlog_recover_check_summary(log)
76 #define xlog_recover_check_ail(mp, lip, gen)
77 #endif
78
79
80 /*
81  * Sector aligned buffer routines for buffer create/read/write/access
82  */
83
84 #define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs)   \
85         ( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
86         ((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
87 #define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno)   ((bno) & ~(log)->l_sectbb_mask)
88
89 xfs_buf_t *
90 xlog_get_bp(
91         xlog_t          *log,
92         int             num_bblks)
93 {
94         ASSERT(num_bblks > 0);
95
96         if (log->l_sectbb_log) {
97                 if (num_bblks > 1)
98                         num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
99                 num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
100         }
101         return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
102 }
103
104 void
105 xlog_put_bp(
106         xfs_buf_t       *bp)
107 {
108         xfs_buf_free(bp);
109 }
110
111
112 /*
113  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
114  */
115 int
116 xlog_bread(
117         xlog_t          *log,
118         xfs_daddr_t     blk_no,
119         int             nbblks,
120         xfs_buf_t       *bp)
121 {
122         int             error;
123
124         if (log->l_sectbb_log) {
125                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
126                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
127         }
128
129         ASSERT(nbblks > 0);
130         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
131         ASSERT(bp);
132
133         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
134         XFS_BUF_READ(bp);
135         XFS_BUF_BUSY(bp);
136         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
137         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
138
139         xfsbdstrat(log->l_mp, bp);
140         if ((error = xfs_iowait(bp)))
141                 xfs_ioerror_alert("xlog_bread", log->l_mp,
142                                   bp, XFS_BUF_ADDR(bp));
143         return error;
144 }
145
146 /*
147  * Write out the buffer at the given block for the given number of blocks.
148  * The buffer is kept locked across the write and is returned locked.
149  * This can only be used for synchronous log writes.
150  */
151 STATIC int
152 xlog_bwrite(
153         xlog_t          *log,
154         xfs_daddr_t     blk_no,
155         int             nbblks,
156         xfs_buf_t       *bp)
157 {
158         int             error;
159
160         if (log->l_sectbb_log) {
161                 blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
162                 nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
163         }
164
165         ASSERT(nbblks > 0);
166         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
167
168         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
169         XFS_BUF_ZEROFLAGS(bp);
170         XFS_BUF_BUSY(bp);
171         XFS_BUF_HOLD(bp);
172         XFS_BUF_PSEMA(bp, PRIBIO);
173         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
174         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
175
176         if ((error = xfs_bwrite(log->l_mp, bp)))
177                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
178                                   bp, XFS_BUF_ADDR(bp));
179         return error;
180 }
181
182 STATIC xfs_caddr_t
183 xlog_align(
184         xlog_t          *log,
185         xfs_daddr_t     blk_no,
186         int             nbblks,
187         xfs_buf_t       *bp)
188 {
189         xfs_caddr_t     ptr;
190
191         if (!log->l_sectbb_log)
192                 return XFS_BUF_PTR(bp);
193
194         ptr = XFS_BUF_PTR(bp) + BBTOB((int)blk_no & log->l_sectbb_mask);
195         ASSERT(XFS_BUF_SIZE(bp) >=
196                 BBTOB(nbblks + (blk_no & log->l_sectbb_mask)));
197         return ptr;
198 }
199
200 #ifdef DEBUG
201 /*
202  * dump debug superblock and log record information
203  */
204 STATIC void
205 xlog_header_check_dump(
206         xfs_mount_t             *mp,
207         xlog_rec_header_t       *head)
208 {
209         int                     b;
210
211         printk("%s:  SB : uuid = ", __FUNCTION__);
212         for (b = 0; b < 16; b++)
213                 printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
214         printk(", fmt = %d\n", XLOG_FMT);
215         printk("    log : uuid = ");
216         for (b = 0; b < 16; b++)
217                 printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
218         printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
219 }
220 #else
221 #define xlog_header_check_dump(mp, head)
222 #endif
223
224 /*
225  * check log record header for recovery
226  */
227 STATIC int
228 xlog_header_check_recover(
229         xfs_mount_t             *mp,
230         xlog_rec_header_t       *head)
231 {
232         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
233
234         /*
235          * IRIX doesn't write the h_fmt field and leaves it zeroed
236          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
237          * a dirty log created in IRIX.
238          */
239         if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) {
240                 xlog_warn(
241         "XFS: dirty log written in incompatible format - can't recover");
242                 xlog_header_check_dump(mp, head);
243                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
244                                  XFS_ERRLEVEL_HIGH, mp);
245                 return XFS_ERROR(EFSCORRUPTED);
246         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
247                 xlog_warn(
248         "XFS: dirty log entry has mismatched uuid - can't recover");
249                 xlog_header_check_dump(mp, head);
250                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
251                                  XFS_ERRLEVEL_HIGH, mp);
252                 return XFS_ERROR(EFSCORRUPTED);
253         }
254         return 0;
255 }
256
257 /*
258  * read the head block of the log and check the header
259  */
260 STATIC int
261 xlog_header_check_mount(
262         xfs_mount_t             *mp,
263         xlog_rec_header_t       *head)
264 {
265         ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM);
266
267         if (uuid_is_nil(&head->h_fs_uuid)) {
268                 /*
269                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
270                  * h_fs_uuid is nil, we assume this log was last mounted
271                  * by IRIX and continue.
272                  */
273                 xlog_warn("XFS: nil uuid in log - IRIX style log");
274         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
275                 xlog_warn("XFS: log has mismatched uuid - can't recover");
276                 xlog_header_check_dump(mp, head);
277                 XFS_ERROR_REPORT("xlog_header_check_mount",
278                                  XFS_ERRLEVEL_HIGH, mp);
279                 return XFS_ERROR(EFSCORRUPTED);
280         }
281         return 0;
282 }
283
284 STATIC void
285 xlog_recover_iodone(
286         struct xfs_buf  *bp)
287 {
288         xfs_mount_t     *mp;
289
290         ASSERT(XFS_BUF_FSPRIVATE(bp, void *));
291
292         if (XFS_BUF_GETERROR(bp)) {
293                 /*
294                  * We're not going to bother about retrying
295                  * this during recovery. One strike!
296                  */
297                 mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *);
298                 xfs_ioerror_alert("xlog_recover_iodone",
299                                   mp, bp, XFS_BUF_ADDR(bp));
300                 xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
301         }
302         XFS_BUF_SET_FSPRIVATE(bp, NULL);
303         XFS_BUF_CLR_IODONE_FUNC(bp);
304         xfs_biodone(bp);
305 }
306
307 /*
308  * This routine finds (to an approximation) the first block in the physical
309  * log which contains the given cycle.  It uses a binary search algorithm.
310  * Note that the algorithm can not be perfect because the disk will not
311  * necessarily be perfect.
312  */
313 int
314 xlog_find_cycle_start(
315         xlog_t          *log,
316         xfs_buf_t       *bp,
317         xfs_daddr_t     first_blk,
318         xfs_daddr_t     *last_blk,
319         uint            cycle)
320 {
321         xfs_caddr_t     offset;
322         xfs_daddr_t     mid_blk;
323         uint            mid_cycle;
324         int             error;
325
326         mid_blk = BLK_AVG(first_blk, *last_blk);
327         while (mid_blk != first_blk && mid_blk != *last_blk) {
328                 if ((error = xlog_bread(log, mid_blk, 1, bp)))
329                         return error;
330                 offset = xlog_align(log, mid_blk, 1, bp);
331                 mid_cycle = GET_CYCLE(offset, ARCH_CONVERT);
332                 if (mid_cycle == cycle) {
333                         *last_blk = mid_blk;
334                         /* last_half_cycle == mid_cycle */
335                 } else {
336                         first_blk = mid_blk;
337                         /* first_half_cycle == mid_cycle */
338                 }
339                 mid_blk = BLK_AVG(first_blk, *last_blk);
340         }
341         ASSERT((mid_blk == first_blk && mid_blk+1 == *last_blk) ||
342                (mid_blk == *last_blk && mid_blk-1 == first_blk));
343
344         return 0;
345 }
346
347 /*
348  * Check that the range of blocks does not contain the cycle number
349  * given.  The scan needs to occur from front to back and the ptr into the
350  * region must be updated since a later routine will need to perform another
351  * test.  If the region is completely good, we end up returning the same
352  * last block number.
353  *
354  * Set blkno to -1 if we encounter no errors.  This is an invalid block number
355  * since we don't ever expect logs to get this large.
356  */
357 STATIC int
358 xlog_find_verify_cycle(
359         xlog_t          *log,
360         xfs_daddr_t     start_blk,
361         int             nbblks,
362         uint            stop_on_cycle_no,
363         xfs_daddr_t     *new_blk)
364 {
365         xfs_daddr_t     i, j;
366         uint            cycle;
367         xfs_buf_t       *bp;
368         xfs_daddr_t     bufblks;
369         xfs_caddr_t     buf = NULL;
370         int             error = 0;
371
372         bufblks = 1 << ffs(nbblks);
373
374         while (!(bp = xlog_get_bp(log, bufblks))) {
375                 /* can't get enough memory to do everything in one big buffer */
376                 bufblks >>= 1;
377                 if (bufblks <= log->l_sectbb_log)
378                         return ENOMEM;
379         }
380
381         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
382                 int     bcount;
383
384                 bcount = min(bufblks, (start_blk + nbblks - i));
385
386                 if ((error = xlog_bread(log, i, bcount, bp)))
387                         goto out;
388
389                 buf = xlog_align(log, i, bcount, bp);
390                 for (j = 0; j < bcount; j++) {
391                         cycle = GET_CYCLE(buf, ARCH_CONVERT);
392                         if (cycle == stop_on_cycle_no) {
393                                 *new_blk = i+j;
394                                 goto out;
395                         }
396
397                         buf += BBSIZE;
398                 }
399         }
400
401         *new_blk = -1;
402
403 out:
404         xlog_put_bp(bp);
405         return error;
406 }
407
408 /*
409  * Potentially backup over partial log record write.
410  *
411  * In the typical case, last_blk is the number of the block directly after
412  * a good log record.  Therefore, we subtract one to get the block number
413  * of the last block in the given buffer.  extra_bblks contains the number
414  * of blocks we would have read on a previous read.  This happens when the
415  * last log record is split over the end of the physical log.
416  *
417  * extra_bblks is the number of blocks potentially verified on a previous
418  * call to this routine.
419  */
420 STATIC int
421 xlog_find_verify_log_record(
422         xlog_t                  *log,
423         xfs_daddr_t             start_blk,
424         xfs_daddr_t             *last_blk,
425         int                     extra_bblks)
426 {
427         xfs_daddr_t             i;
428         xfs_buf_t               *bp;
429         xfs_caddr_t             offset = NULL;
430         xlog_rec_header_t       *head = NULL;
431         int                     error = 0;
432         int                     smallmem = 0;
433         int                     num_blks = *last_blk - start_blk;
434         int                     xhdrs;
435
436         ASSERT(start_blk != 0 || *last_blk != start_blk);
437
438         if (!(bp = xlog_get_bp(log, num_blks))) {
439                 if (!(bp = xlog_get_bp(log, 1)))
440                         return ENOMEM;
441                 smallmem = 1;
442         } else {
443                 if ((error = xlog_bread(log, start_blk, num_blks, bp)))
444                         goto out;
445                 offset = xlog_align(log, start_blk, num_blks, bp);
446                 offset += ((num_blks - 1) << BBSHIFT);
447         }
448
449         for (i = (*last_blk) - 1; i >= 0; i--) {
450                 if (i < start_blk) {
451                         /* valid log record not found */
452                         xlog_warn(
453                 "XFS: Log inconsistent (didn't find previous header)");
454                         ASSERT(0);
455                         error = XFS_ERROR(EIO);
456                         goto out;
457                 }
458
459                 if (smallmem) {
460                         if ((error = xlog_bread(log, i, 1, bp)))
461                                 goto out;
462                         offset = xlog_align(log, i, 1, bp);
463                 }
464
465                 head = (xlog_rec_header_t *)offset;
466
467                 if (XLOG_HEADER_MAGIC_NUM ==
468                     INT_GET(head->h_magicno, ARCH_CONVERT))
469                         break;
470
471                 if (!smallmem)
472                         offset -= BBSIZE;
473         }
474
475         /*
476          * We hit the beginning of the physical log & still no header.  Return
477          * to caller.  If caller can handle a return of -1, then this routine
478          * will be called again for the end of the physical log.
479          */
480         if (i == -1) {
481                 error = -1;
482                 goto out;
483         }
484
485         /*
486          * We have the final block of the good log (the first block
487          * of the log record _before_ the head. So we check the uuid.
488          */
489         if ((error = xlog_header_check_mount(log->l_mp, head)))
490                 goto out;
491
492         /*
493          * We may have found a log record header before we expected one.
494          * last_blk will be the 1st block # with a given cycle #.  We may end
495          * up reading an entire log record.  In this case, we don't want to
496          * reset last_blk.  Only when last_blk points in the middle of a log
497          * record do we update last_blk.
498          */
499         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
500                 uint    h_size = INT_GET(head->h_size, ARCH_CONVERT);
501
502                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
503                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
504                         xhdrs++;
505         } else {
506                 xhdrs = 1;
507         }
508
509         if (*last_blk - i + extra_bblks
510                         != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs)
511                 *last_blk = i;
512
513 out:
514         xlog_put_bp(bp);
515         return error;
516 }
517
518 /*
519  * Head is defined to be the point of the log where the next log write
520  * write could go.  This means that incomplete LR writes at the end are
521  * eliminated when calculating the head.  We aren't guaranteed that previous
522  * LR have complete transactions.  We only know that a cycle number of
523  * current cycle number -1 won't be present in the log if we start writing
524  * from our current block number.
525  *
526  * last_blk contains the block number of the first block with a given
527  * cycle number.
528  *
529  * Return: zero if normal, non-zero if error.
530  */
531 STATIC int
532 xlog_find_head(
533         xlog_t          *log,
534         xfs_daddr_t     *return_head_blk)
535 {
536         xfs_buf_t       *bp;
537         xfs_caddr_t     offset;
538         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
539         int             num_scan_bblks;
540         uint            first_half_cycle, last_half_cycle;
541         uint            stop_on_cycle;
542         int             error, log_bbnum = log->l_logBBsize;
543
544         /* Is the end of the log device zeroed? */
545         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
546                 *return_head_blk = first_blk;
547
548                 /* Is the whole lot zeroed? */
549                 if (!first_blk) {
550                         /* Linux XFS shouldn't generate totally zeroed logs -
551                          * mkfs etc write a dummy unmount record to a fresh
552                          * log so we can store the uuid in there
553                          */
554                         xlog_warn("XFS: totally zeroed log");
555                 }
556
557                 return 0;
558         } else if (error) {
559                 xlog_warn("XFS: empty log check failed");
560                 return error;
561         }
562
563         first_blk = 0;                  /* get cycle # of 1st block */
564         bp = xlog_get_bp(log, 1);
565         if (!bp)
566                 return ENOMEM;
567         if ((error = xlog_bread(log, 0, 1, bp)))
568                 goto bp_err;
569         offset = xlog_align(log, 0, 1, bp);
570         first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
571
572         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
573         if ((error = xlog_bread(log, last_blk, 1, bp)))
574                 goto bp_err;
575         offset = xlog_align(log, last_blk, 1, bp);
576         last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT);
577         ASSERT(last_half_cycle != 0);
578
579         /*
580          * If the 1st half cycle number is equal to the last half cycle number,
581          * then the entire log is stamped with the same cycle number.  In this
582          * case, head_blk can't be set to zero (which makes sense).  The below
583          * math doesn't work out properly with head_blk equal to zero.  Instead,
584          * we set it to log_bbnum which is an invalid block number, but this
585          * value makes the math correct.  If head_blk doesn't changed through
586          * all the tests below, *head_blk is set to zero at the very end rather
587          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
588          * in a circular file.
589          */
590         if (first_half_cycle == last_half_cycle) {
591                 /*
592                  * In this case we believe that the entire log should have
593                  * cycle number last_half_cycle.  We need to scan backwards
594                  * from the end verifying that there are no holes still
595                  * containing last_half_cycle - 1.  If we find such a hole,
596                  * then the start of that hole will be the new head.  The
597                  * simple case looks like
598                  *        x | x ... | x - 1 | x
599                  * Another case that fits this picture would be
600                  *        x | x + 1 | x ... | x
601                  * In this case the head really is somwhere at the end of the
602                  * log, as one of the latest writes at the beginning was
603                  * incomplete.
604                  * One more case is
605                  *        x | x + 1 | x ... | x - 1 | x
606                  * This is really the combination of the above two cases, and
607                  * the head has to end up at the start of the x-1 hole at the
608                  * end of the log.
609                  *
610                  * In the 256k log case, we will read from the beginning to the
611                  * end of the log and search for cycle numbers equal to x-1.
612                  * We don't worry about the x+1 blocks that we encounter,
613                  * because we know that they cannot be the head since the log
614                  * started with x.
615                  */
616                 head_blk = log_bbnum;
617                 stop_on_cycle = last_half_cycle - 1;
618         } else {
619                 /*
620                  * In this case we want to find the first block with cycle
621                  * number matching last_half_cycle.  We expect the log to be
622                  * some variation on
623                  *        x + 1 ... | x ...
624                  * The first block with cycle number x (last_half_cycle) will
625                  * be where the new head belongs.  First we do a binary search
626                  * for the first occurrence of last_half_cycle.  The binary
627                  * search may not be totally accurate, so then we scan back
628                  * from there looking for occurrences of last_half_cycle before
629                  * us.  If that backwards scan wraps around the beginning of
630                  * the log, then we look for occurrences of last_half_cycle - 1
631                  * at the end of the log.  The cases we're looking for look
632                  * like
633                  *        x + 1 ... | x | x + 1 | x ...
634                  *                               ^ binary search stopped here
635                  * or
636                  *        x + 1 ... | x ... | x - 1 | x
637                  *        <---------> less than scan distance
638                  */
639                 stop_on_cycle = last_half_cycle;
640                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
641                                                 &head_blk, last_half_cycle)))
642                         goto bp_err;
643         }
644
645         /*
646          * Now validate the answer.  Scan back some number of maximum possible
647          * blocks and make sure each one has the expected cycle number.  The
648          * maximum is determined by the total possible amount of buffering
649          * in the in-core log.  The following number can be made tighter if
650          * we actually look at the block size of the filesystem.
651          */
652         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
653         if (head_blk >= num_scan_bblks) {
654                 /*
655                  * We are guaranteed that the entire check can be performed
656                  * in one buffer.
657                  */
658                 start_blk = head_blk - num_scan_bblks;
659                 if ((error = xlog_find_verify_cycle(log,
660                                                 start_blk, num_scan_bblks,
661                                                 stop_on_cycle, &new_blk)))
662                         goto bp_err;
663                 if (new_blk != -1)
664                         head_blk = new_blk;
665         } else {                /* need to read 2 parts of log */
666                 /*
667                  * We are going to scan backwards in the log in two parts.
668                  * First we scan the physical end of the log.  In this part
669                  * of the log, we are looking for blocks with cycle number
670                  * last_half_cycle - 1.
671                  * If we find one, then we know that the log starts there, as
672                  * we've found a hole that didn't get written in going around
673                  * the end of the physical log.  The simple case for this is
674                  *        x + 1 ... | x ... | x - 1 | x
675                  *        <---------> less than scan distance
676                  * If all of the blocks at the end of the log have cycle number
677                  * last_half_cycle, then we check the blocks at the start of
678                  * the log looking for occurrences of last_half_cycle.  If we
679                  * find one, then our current estimate for the location of the
680                  * first occurrence of last_half_cycle is wrong and we move
681                  * back to the hole we've found.  This case looks like
682                  *        x + 1 ... | x | x + 1 | x ...
683                  *                               ^ binary search stopped here
684                  * Another case we need to handle that only occurs in 256k
685                  * logs is
686                  *        x + 1 ... | x ... | x+1 | x ...
687                  *                   ^ binary search stops here
688                  * In a 256k log, the scan at the end of the log will see the
689                  * x + 1 blocks.  We need to skip past those since that is
690                  * certainly not the head of the log.  By searching for
691                  * last_half_cycle-1 we accomplish that.
692                  */
693                 start_blk = log_bbnum - num_scan_bblks + head_blk;
694                 ASSERT(head_blk <= INT_MAX &&
695                         (xfs_daddr_t) num_scan_bblks - head_blk >= 0);
696                 if ((error = xlog_find_verify_cycle(log, start_blk,
697                                         num_scan_bblks - (int)head_blk,
698                                         (stop_on_cycle - 1), &new_blk)))
699                         goto bp_err;
700                 if (new_blk != -1) {
701                         head_blk = new_blk;
702                         goto bad_blk;
703                 }
704
705                 /*
706                  * Scan beginning of log now.  The last part of the physical
707                  * log is good.  This scan needs to verify that it doesn't find
708                  * the last_half_cycle.
709                  */
710                 start_blk = 0;
711                 ASSERT(head_blk <= INT_MAX);
712                 if ((error = xlog_find_verify_cycle(log,
713                                         start_blk, (int)head_blk,
714                                         stop_on_cycle, &new_blk)))
715                         goto bp_err;
716                 if (new_blk != -1)
717                         head_blk = new_blk;
718         }
719
720  bad_blk:
721         /*
722          * Now we need to make sure head_blk is not pointing to a block in
723          * the middle of a log record.
724          */
725         num_scan_bblks = XLOG_REC_SHIFT(log);
726         if (head_blk >= num_scan_bblks) {
727                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
728
729                 /* start ptr at last block ptr before head_blk */
730                 if ((error = xlog_find_verify_log_record(log, start_blk,
731                                                         &head_blk, 0)) == -1) {
732                         error = XFS_ERROR(EIO);
733                         goto bp_err;
734                 } else if (error)
735                         goto bp_err;
736         } else {
737                 start_blk = 0;
738                 ASSERT(head_blk <= INT_MAX);
739                 if ((error = xlog_find_verify_log_record(log, start_blk,
740                                                         &head_blk, 0)) == -1) {
741                         /* We hit the beginning of the log during our search */
742                         start_blk = log_bbnum - num_scan_bblks + head_blk;
743                         new_blk = log_bbnum;
744                         ASSERT(start_blk <= INT_MAX &&
745                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
746                         ASSERT(head_blk <= INT_MAX);
747                         if ((error = xlog_find_verify_log_record(log,
748                                                         start_blk, &new_blk,
749                                                         (int)head_blk)) == -1) {
750                                 error = XFS_ERROR(EIO);
751                                 goto bp_err;
752                         } else if (error)
753                                 goto bp_err;
754                         if (new_blk != log_bbnum)
755                                 head_blk = new_blk;
756                 } else if (error)
757                         goto bp_err;
758         }
759
760         xlog_put_bp(bp);
761         if (head_blk == log_bbnum)
762                 *return_head_blk = 0;
763         else
764                 *return_head_blk = head_blk;
765         /*
766          * When returning here, we have a good block number.  Bad block
767          * means that during a previous crash, we didn't have a clean break
768          * from cycle number N to cycle number N-1.  In this case, we need
769          * to find the first block with cycle number N-1.
770          */
771         return 0;
772
773  bp_err:
774         xlog_put_bp(bp);
775
776         if (error)
777             xlog_warn("XFS: failed to find log head");
778         return error;
779 }
780
781 /*
782  * Find the sync block number or the tail of the log.
783  *
784  * This will be the block number of the last record to have its
785  * associated buffers synced to disk.  Every log record header has
786  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
787  * to get a sync block number.  The only concern is to figure out which
788  * log record header to believe.
789  *
790  * The following algorithm uses the log record header with the largest
791  * lsn.  The entire log record does not need to be valid.  We only care
792  * that the header is valid.
793  *
794  * We could speed up search by using current head_blk buffer, but it is not
795  * available.
796  */
797 int
798 xlog_find_tail(
799         xlog_t                  *log,
800         xfs_daddr_t             *head_blk,
801         xfs_daddr_t             *tail_blk,
802         int                     readonly)
803 {
804         xlog_rec_header_t       *rhead;
805         xlog_op_header_t        *op_head;
806         xfs_caddr_t             offset = NULL;
807         xfs_buf_t               *bp;
808         int                     error, i, found;
809         xfs_daddr_t             umount_data_blk;
810         xfs_daddr_t             after_umount_blk;
811         xfs_lsn_t               tail_lsn;
812         int                     hblks;
813
814         found = 0;
815
816         /*
817          * Find previous log record
818          */
819         if ((error = xlog_find_head(log, head_blk)))
820                 return error;
821
822         bp = xlog_get_bp(log, 1);
823         if (!bp)
824                 return ENOMEM;
825         if (*head_blk == 0) {                           /* special case */
826                 if ((error = xlog_bread(log, 0, 1, bp)))
827                         goto bread_err;
828                 offset = xlog_align(log, 0, 1, bp);
829                 if (GET_CYCLE(offset, ARCH_CONVERT) == 0) {
830                         *tail_blk = 0;
831                         /* leave all other log inited values alone */
832                         goto exit;
833                 }
834         }
835
836         /*
837          * Search backwards looking for log record header block
838          */
839         ASSERT(*head_blk < INT_MAX);
840         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
841                 if ((error = xlog_bread(log, i, 1, bp)))
842                         goto bread_err;
843                 offset = xlog_align(log, i, 1, bp);
844                 if (XLOG_HEADER_MAGIC_NUM ==
845                     INT_GET(*(uint *)offset, ARCH_CONVERT)) {
846                         found = 1;
847                         break;
848                 }
849         }
850         /*
851          * If we haven't found the log record header block, start looking
852          * again from the end of the physical log.  XXXmiken: There should be
853          * a check here to make sure we didn't search more than N blocks in
854          * the previous code.
855          */
856         if (!found) {
857                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
858                         if ((error = xlog_bread(log, i, 1, bp)))
859                                 goto bread_err;
860                         offset = xlog_align(log, i, 1, bp);
861                         if (XLOG_HEADER_MAGIC_NUM ==
862                             INT_GET(*(uint*)offset, ARCH_CONVERT)) {
863                                 found = 2;
864                                 break;
865                         }
866                 }
867         }
868         if (!found) {
869                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
870                 ASSERT(0);
871                 return XFS_ERROR(EIO);
872         }
873
874         /* find blk_no of tail of log */
875         rhead = (xlog_rec_header_t *)offset;
876         *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT));
877
878         /*
879          * Reset log values according to the state of the log when we
880          * crashed.  In the case where head_blk == 0, we bump curr_cycle
881          * one because the next write starts a new cycle rather than
882          * continuing the cycle of the last good log record.  At this
883          * point we have guaranteed that all partial log records have been
884          * accounted for.  Therefore, we know that the last good log record
885          * written was complete and ended exactly on the end boundary
886          * of the physical log.
887          */
888         log->l_prev_block = i;
889         log->l_curr_block = (int)*head_blk;
890         log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT);
891         if (found == 2)
892                 log->l_curr_cycle++;
893         log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT);
894         log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT);
895         log->l_grant_reserve_cycle = log->l_curr_cycle;
896         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
897         log->l_grant_write_cycle = log->l_curr_cycle;
898         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
899
900         /*
901          * Look for unmount record.  If we find it, then we know there
902          * was a clean unmount.  Since 'i' could be the last block in
903          * the physical log, we convert to a log block before comparing
904          * to the head_blk.
905          *
906          * Save the current tail lsn to use to pass to
907          * xlog_clear_stale_blocks() below.  We won't want to clear the
908          * unmount record if there is one, so we pass the lsn of the
909          * unmount record rather than the block after it.
910          */
911         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
912                 int     h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
913                 int     h_version = INT_GET(rhead->h_version, ARCH_CONVERT);
914
915                 if ((h_version & XLOG_VERSION_2) &&
916                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
917                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
918                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
919                                 hblks++;
920                 } else {
921                         hblks = 1;
922                 }
923         } else {
924                 hblks = 1;
925         }
926         after_umount_blk = (i + hblks + (int)
927                 BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize;
928         tail_lsn = log->l_tail_lsn;
929         if (*head_blk == after_umount_blk &&
930             INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) {
931                 umount_data_blk = (i + hblks) % log->l_logBBsize;
932                 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) {
933                         goto bread_err;
934                 }
935                 offset = xlog_align(log, umount_data_blk, 1, bp);
936                 op_head = (xlog_op_header_t *)offset;
937                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
938                         /*
939                          * Set tail and last sync so that newly written
940                          * log records will point recovery to after the
941                          * current unmount record.
942                          */
943                         ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle,
944                                         after_umount_blk);
945                         ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle,
946                                         after_umount_blk);
947                         *tail_blk = after_umount_blk;
948                 }
949         }
950
951         /*
952          * Make sure that there are no blocks in front of the head
953          * with the same cycle number as the head.  This can happen
954          * because we allow multiple outstanding log writes concurrently,
955          * and the later writes might make it out before earlier ones.
956          *
957          * We use the lsn from before modifying it so that we'll never
958          * overwrite the unmount record after a clean unmount.
959          *
960          * Do this only if we are going to recover the filesystem
961          *
962          * NOTE: This used to say "if (!readonly)"
963          * However on Linux, we can & do recover a read-only filesystem.
964          * We only skip recovery if NORECOVERY is specified on mount,
965          * in which case we would not be here.
966          *
967          * But... if the -device- itself is readonly, just skip this.
968          * We can't recover this device anyway, so it won't matter.
969          */
970         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) {
971                 error = xlog_clear_stale_blocks(log, tail_lsn);
972         }
973
974 bread_err:
975 exit:
976         xlog_put_bp(bp);
977
978         if (error)
979                 xlog_warn("XFS: failed to locate log tail");
980         return error;
981 }
982
983 /*
984  * Is the log zeroed at all?
985  *
986  * The last binary search should be changed to perform an X block read
987  * once X becomes small enough.  You can then search linearly through
988  * the X blocks.  This will cut down on the number of reads we need to do.
989  *
990  * If the log is partially zeroed, this routine will pass back the blkno
991  * of the first block with cycle number 0.  It won't have a complete LR
992  * preceding it.
993  *
994  * Return:
995  *      0  => the log is completely written to
996  *      -1 => use *blk_no as the first block of the log
997  *      >0 => error has occurred
998  */
999 int
1000 xlog_find_zeroed(
1001         xlog_t          *log,
1002         xfs_daddr_t     *blk_no)
1003 {
1004         xfs_buf_t       *bp;
1005         xfs_caddr_t     offset;
1006         uint            first_cycle, last_cycle;
1007         xfs_daddr_t     new_blk, last_blk, start_blk;
1008         xfs_daddr_t     num_scan_bblks;
1009         int             error, log_bbnum = log->l_logBBsize;
1010
1011         /* check totally zeroed log */
1012         bp = xlog_get_bp(log, 1);
1013         if (!bp)
1014                 return ENOMEM;
1015         if ((error = xlog_bread(log, 0, 1, bp)))
1016                 goto bp_err;
1017         offset = xlog_align(log, 0, 1, bp);
1018         first_cycle = GET_CYCLE(offset, ARCH_CONVERT);
1019         if (first_cycle == 0) {         /* completely zeroed log */
1020                 *blk_no = 0;
1021                 xlog_put_bp(bp);
1022                 return -1;
1023         }
1024
1025         /* check partially zeroed log */
1026         if ((error = xlog_bread(log, log_bbnum-1, 1, bp)))
1027                 goto bp_err;
1028         offset = xlog_align(log, log_bbnum-1, 1, bp);
1029         last_cycle = GET_CYCLE(offset, ARCH_CONVERT);
1030         if (last_cycle != 0) {          /* log completely written to */
1031                 xlog_put_bp(bp);
1032                 return 0;
1033         } else if (first_cycle != 1) {
1034                 /*
1035                  * If the cycle of the last block is zero, the cycle of
1036                  * the first block must be 1. If it's not, maybe we're
1037                  * not looking at a log... Bail out.
1038                  */
1039                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1040                 return XFS_ERROR(EINVAL);
1041         }
1042
1043         /* we have a partially zeroed log */
1044         last_blk = log_bbnum-1;
1045         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1046                 goto bp_err;
1047
1048         /*
1049          * Validate the answer.  Because there is no way to guarantee that
1050          * the entire log is made up of log records which are the same size,
1051          * we scan over the defined maximum blocks.  At this point, the maximum
1052          * is not chosen to mean anything special.   XXXmiken
1053          */
1054         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1055         ASSERT(num_scan_bblks <= INT_MAX);
1056
1057         if (last_blk < num_scan_bblks)
1058                 num_scan_bblks = last_blk;
1059         start_blk = last_blk - num_scan_bblks;
1060
1061         /*
1062          * We search for any instances of cycle number 0 that occur before
1063          * our current estimate of the head.  What we're trying to detect is
1064          *        1 ... | 0 | 1 | 0...
1065          *                       ^ binary search ends here
1066          */
1067         if ((error = xlog_find_verify_cycle(log, start_blk,
1068                                          (int)num_scan_bblks, 0, &new_blk)))
1069                 goto bp_err;
1070         if (new_blk != -1)
1071                 last_blk = new_blk;
1072
1073         /*
1074          * Potentially backup over partial log record write.  We don't need
1075          * to search the end of the log because we know it is zero.
1076          */
1077         if ((error = xlog_find_verify_log_record(log, start_blk,
1078                                 &last_blk, 0)) == -1) {
1079             error = XFS_ERROR(EIO);
1080             goto bp_err;
1081         } else if (error)
1082             goto bp_err;
1083
1084         *blk_no = last_blk;
1085 bp_err:
1086         xlog_put_bp(bp);
1087         if (error)
1088                 return error;
1089         return -1;
1090 }
1091
1092 /*
1093  * These are simple subroutines used by xlog_clear_stale_blocks() below
1094  * to initialize a buffer full of empty log record headers and write
1095  * them into the log.
1096  */
1097 STATIC void
1098 xlog_add_record(
1099         xlog_t                  *log,
1100         xfs_caddr_t             buf,
1101         int                     cycle,
1102         int                     block,
1103         int                     tail_cycle,
1104         int                     tail_block)
1105 {
1106         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1107
1108         memset(buf, 0, BBSIZE);
1109         INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM);
1110         INT_SET(recp->h_cycle, ARCH_CONVERT, cycle);
1111         INT_SET(recp->h_version, ARCH_CONVERT,
1112                         XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1);
1113         ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block);
1114         ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block);
1115         INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT);
1116         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1117 }
1118
1119 STATIC int
1120 xlog_write_log_records(
1121         xlog_t          *log,
1122         int             cycle,
1123         int             start_block,
1124         int             blocks,
1125         int             tail_cycle,
1126         int             tail_block)
1127 {
1128         xfs_caddr_t     offset;
1129         xfs_buf_t       *bp;
1130         int             balign, ealign;
1131         int             sectbb = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
1132         int             end_block = start_block + blocks;
1133         int             bufblks;
1134         int             error = 0;
1135         int             i, j = 0;
1136
1137         bufblks = 1 << ffs(blocks);
1138         while (!(bp = xlog_get_bp(log, bufblks))) {
1139                 bufblks >>= 1;
1140                 if (bufblks <= log->l_sectbb_log)
1141                         return ENOMEM;
1142         }
1143
1144         /* We may need to do a read at the start to fill in part of
1145          * the buffer in the starting sector not covered by the first
1146          * write below.
1147          */
1148         balign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, start_block);
1149         if (balign != start_block) {
1150                 if ((error = xlog_bread(log, start_block, 1, bp))) {
1151                         xlog_put_bp(bp);
1152                         return error;
1153                 }
1154                 j = start_block - balign;
1155         }
1156
1157         for (i = start_block; i < end_block; i += bufblks) {
1158                 int             bcount, endcount;
1159
1160                 bcount = min(bufblks, end_block - start_block);
1161                 endcount = bcount - j;
1162
1163                 /* We may need to do a read at the end to fill in part of
1164                  * the buffer in the final sector not covered by the write.
1165                  * If this is the same sector as the above read, skip it.
1166                  */
1167                 ealign = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, end_block);
1168                 if (j == 0 && (start_block + endcount > ealign)) {
1169                         offset = XFS_BUF_PTR(bp);
1170                         balign = BBTOB(ealign - start_block);
1171                         XFS_BUF_SET_PTR(bp, offset + balign, BBTOB(sectbb));
1172                         if ((error = xlog_bread(log, ealign, sectbb, bp)))
1173                                 break;
1174                         XFS_BUF_SET_PTR(bp, offset, bufblks);
1175                 }
1176
1177                 offset = xlog_align(log, start_block, endcount, bp);
1178                 for (; j < endcount; j++) {
1179                         xlog_add_record(log, offset, cycle, i+j,
1180                                         tail_cycle, tail_block);
1181                         offset += BBSIZE;
1182                 }
1183                 error = xlog_bwrite(log, start_block, endcount, bp);
1184                 if (error)
1185                         break;
1186                 start_block += endcount;
1187                 j = 0;
1188         }
1189         xlog_put_bp(bp);
1190         return error;
1191 }
1192
1193 /*
1194  * This routine is called to blow away any incomplete log writes out
1195  * in front of the log head.  We do this so that we won't become confused
1196  * if we come up, write only a little bit more, and then crash again.
1197  * If we leave the partial log records out there, this situation could
1198  * cause us to think those partial writes are valid blocks since they
1199  * have the current cycle number.  We get rid of them by overwriting them
1200  * with empty log records with the old cycle number rather than the
1201  * current one.
1202  *
1203  * The tail lsn is passed in rather than taken from
1204  * the log so that we will not write over the unmount record after a
1205  * clean unmount in a 512 block log.  Doing so would leave the log without
1206  * any valid log records in it until a new one was written.  If we crashed
1207  * during that time we would not be able to recover.
1208  */
1209 STATIC int
1210 xlog_clear_stale_blocks(
1211         xlog_t          *log,
1212         xfs_lsn_t       tail_lsn)
1213 {
1214         int             tail_cycle, head_cycle;
1215         int             tail_block, head_block;
1216         int             tail_distance, max_distance;
1217         int             distance;
1218         int             error;
1219
1220         tail_cycle = CYCLE_LSN(tail_lsn);
1221         tail_block = BLOCK_LSN(tail_lsn);
1222         head_cycle = log->l_curr_cycle;
1223         head_block = log->l_curr_block;
1224
1225         /*
1226          * Figure out the distance between the new head of the log
1227          * and the tail.  We want to write over any blocks beyond the
1228          * head that we may have written just before the crash, but
1229          * we don't want to overwrite the tail of the log.
1230          */
1231         if (head_cycle == tail_cycle) {
1232                 /*
1233                  * The tail is behind the head in the physical log,
1234                  * so the distance from the head to the tail is the
1235                  * distance from the head to the end of the log plus
1236                  * the distance from the beginning of the log to the
1237                  * tail.
1238                  */
1239                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1240                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1241                                          XFS_ERRLEVEL_LOW, log->l_mp);
1242                         return XFS_ERROR(EFSCORRUPTED);
1243                 }
1244                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1245         } else {
1246                 /*
1247                  * The head is behind the tail in the physical log,
1248                  * so the distance from the head to the tail is just
1249                  * the tail block minus the head block.
1250                  */
1251                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1252                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1253                                          XFS_ERRLEVEL_LOW, log->l_mp);
1254                         return XFS_ERROR(EFSCORRUPTED);
1255                 }
1256                 tail_distance = tail_block - head_block;
1257         }
1258
1259         /*
1260          * If the head is right up against the tail, we can't clear
1261          * anything.
1262          */
1263         if (tail_distance <= 0) {
1264                 ASSERT(tail_distance == 0);
1265                 return 0;
1266         }
1267
1268         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1269         /*
1270          * Take the smaller of the maximum amount of outstanding I/O
1271          * we could have and the distance to the tail to clear out.
1272          * We take the smaller so that we don't overwrite the tail and
1273          * we don't waste all day writing from the head to the tail
1274          * for no reason.
1275          */
1276         max_distance = MIN(max_distance, tail_distance);
1277
1278         if ((head_block + max_distance) <= log->l_logBBsize) {
1279                 /*
1280                  * We can stomp all the blocks we need to without
1281                  * wrapping around the end of the log.  Just do it
1282                  * in a single write.  Use the cycle number of the
1283                  * current cycle minus one so that the log will look like:
1284                  *     n ... | n - 1 ...
1285                  */
1286                 error = xlog_write_log_records(log, (head_cycle - 1),
1287                                 head_block, max_distance, tail_cycle,
1288                                 tail_block);
1289                 if (error)
1290                         return error;
1291         } else {
1292                 /*
1293                  * We need to wrap around the end of the physical log in
1294                  * order to clear all the blocks.  Do it in two separate
1295                  * I/Os.  The first write should be from the head to the
1296                  * end of the physical log, and it should use the current
1297                  * cycle number minus one just like above.
1298                  */
1299                 distance = log->l_logBBsize - head_block;
1300                 error = xlog_write_log_records(log, (head_cycle - 1),
1301                                 head_block, distance, tail_cycle,
1302                                 tail_block);
1303
1304                 if (error)
1305                         return error;
1306
1307                 /*
1308                  * Now write the blocks at the start of the physical log.
1309                  * This writes the remainder of the blocks we want to clear.
1310                  * It uses the current cycle number since we're now on the
1311                  * same cycle as the head so that we get:
1312                  *    n ... n ... | n - 1 ...
1313                  *    ^^^^^ blocks we're writing
1314                  */
1315                 distance = max_distance - (log->l_logBBsize - head_block);
1316                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1317                                 tail_cycle, tail_block);
1318                 if (error)
1319                         return error;
1320         }
1321
1322         return 0;
1323 }
1324
1325 /******************************************************************************
1326  *
1327  *              Log recover routines
1328  *
1329  ******************************************************************************
1330  */
1331
1332 STATIC xlog_recover_t *
1333 xlog_recover_find_tid(
1334         xlog_recover_t          *q,
1335         xlog_tid_t              tid)
1336 {
1337         xlog_recover_t          *p = q;
1338
1339         while (p != NULL) {
1340                 if (p->r_log_tid == tid)
1341                     break;
1342                 p = p->r_next;
1343         }
1344         return p;
1345 }
1346
1347 STATIC void
1348 xlog_recover_put_hashq(
1349         xlog_recover_t          **q,
1350         xlog_recover_t          *trans)
1351 {
1352         trans->r_next = *q;
1353         *q = trans;
1354 }
1355
1356 STATIC void
1357 xlog_recover_add_item(
1358         xlog_recover_item_t     **itemq)
1359 {
1360         xlog_recover_item_t     *item;
1361
1362         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1363         xlog_recover_insert_item_backq(itemq, item);
1364 }
1365
1366 STATIC int
1367 xlog_recover_add_to_cont_trans(
1368         xlog_recover_t          *trans,
1369         xfs_caddr_t             dp,
1370         int                     len)
1371 {
1372         xlog_recover_item_t     *item;
1373         xfs_caddr_t             ptr, old_ptr;
1374         int                     old_len;
1375
1376         item = trans->r_itemq;
1377         if (item == 0) {
1378                 /* finish copying rest of trans header */
1379                 xlog_recover_add_item(&trans->r_itemq);
1380                 ptr = (xfs_caddr_t) &trans->r_theader +
1381                                 sizeof(xfs_trans_header_t) - len;
1382                 memcpy(ptr, dp, len); /* d, s, l */
1383                 return 0;
1384         }
1385         item = item->ri_prev;
1386
1387         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1388         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1389
1390         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0);
1391         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1392         item->ri_buf[item->ri_cnt-1].i_len += len;
1393         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1394         return 0;
1395 }
1396
1397 /*
1398  * The next region to add is the start of a new region.  It could be
1399  * a whole region or it could be the first part of a new region.  Because
1400  * of this, the assumption here is that the type and size fields of all
1401  * format structures fit into the first 32 bits of the structure.
1402  *
1403  * This works because all regions must be 32 bit aligned.  Therefore, we
1404  * either have both fields or we have neither field.  In the case we have
1405  * neither field, the data part of the region is zero length.  We only have
1406  * a log_op_header and can throw away the header since a new one will appear
1407  * later.  If we have at least 4 bytes, then we can determine how many regions
1408  * will appear in the current log item.
1409  */
1410 STATIC int
1411 xlog_recover_add_to_trans(
1412         xlog_recover_t          *trans,
1413         xfs_caddr_t             dp,
1414         int                     len)
1415 {
1416         xfs_inode_log_format_t  *in_f;                  /* any will do */
1417         xlog_recover_item_t     *item;
1418         xfs_caddr_t             ptr;
1419
1420         if (!len)
1421                 return 0;
1422         item = trans->r_itemq;
1423         if (item == 0) {
1424                 ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC);
1425                 if (len == sizeof(xfs_trans_header_t))
1426                         xlog_recover_add_item(&trans->r_itemq);
1427                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1428                 return 0;
1429         }
1430
1431         ptr = kmem_alloc(len, KM_SLEEP);
1432         memcpy(ptr, dp, len);
1433         in_f = (xfs_inode_log_format_t *)ptr;
1434
1435         if (item->ri_prev->ri_total != 0 &&
1436              item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
1437                 xlog_recover_add_item(&trans->r_itemq);
1438         }
1439         item = trans->r_itemq;
1440         item = item->ri_prev;
1441
1442         if (item->ri_total == 0) {              /* first region to be added */
1443                 item->ri_total  = in_f->ilf_size;
1444                 ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
1445                 item->ri_buf = kmem_zalloc((item->ri_total *
1446                                             sizeof(xfs_log_iovec_t)), KM_SLEEP);
1447         }
1448         ASSERT(item->ri_total > item->ri_cnt);
1449         /* Description region is ri_buf[0] */
1450         item->ri_buf[item->ri_cnt].i_addr = ptr;
1451         item->ri_buf[item->ri_cnt].i_len  = len;
1452         item->ri_cnt++;
1453         return 0;
1454 }
1455
1456 STATIC void
1457 xlog_recover_new_tid(
1458         xlog_recover_t          **q,
1459         xlog_tid_t              tid,
1460         xfs_lsn_t               lsn)
1461 {
1462         xlog_recover_t          *trans;
1463
1464         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1465         trans->r_log_tid   = tid;
1466         trans->r_lsn       = lsn;
1467         xlog_recover_put_hashq(q, trans);
1468 }
1469
1470 STATIC int
1471 xlog_recover_unlink_tid(
1472         xlog_recover_t          **q,
1473         xlog_recover_t          *trans)
1474 {
1475         xlog_recover_t          *tp;
1476         int                     found = 0;
1477
1478         ASSERT(trans != 0);
1479         if (trans == *q) {
1480                 *q = (*q)->r_next;
1481         } else {
1482                 tp = *q;
1483                 while (tp != 0) {
1484                         if (tp->r_next == trans) {
1485                                 found = 1;
1486                                 break;
1487                         }
1488                         tp = tp->r_next;
1489                 }
1490                 if (!found) {
1491                         xlog_warn(
1492                              "XFS: xlog_recover_unlink_tid: trans not found");
1493                         ASSERT(0);
1494                         return XFS_ERROR(EIO);
1495                 }
1496                 tp->r_next = tp->r_next->r_next;
1497         }
1498         return 0;
1499 }
1500
1501 STATIC void
1502 xlog_recover_insert_item_backq(
1503         xlog_recover_item_t     **q,
1504         xlog_recover_item_t     *item)
1505 {
1506         if (*q == 0) {
1507                 item->ri_prev = item->ri_next = item;
1508                 *q = item;
1509         } else {
1510                 item->ri_next           = *q;
1511                 item->ri_prev           = (*q)->ri_prev;
1512                 (*q)->ri_prev           = item;
1513                 item->ri_prev->ri_next  = item;
1514         }
1515 }
1516
1517 STATIC void
1518 xlog_recover_insert_item_frontq(
1519         xlog_recover_item_t     **q,
1520         xlog_recover_item_t     *item)
1521 {
1522         xlog_recover_insert_item_backq(q, item);
1523         *q = item;
1524 }
1525
1526 STATIC int
1527 xlog_recover_reorder_trans(
1528         xlog_t                  *log,
1529         xlog_recover_t          *trans)
1530 {
1531         xlog_recover_item_t     *first_item, *itemq, *itemq_next;
1532         xfs_buf_log_format_t    *buf_f;
1533         xfs_buf_log_format_v1_t *obuf_f;
1534         ushort                  flags = 0;
1535
1536         first_item = itemq = trans->r_itemq;
1537         trans->r_itemq = NULL;
1538         do {
1539                 itemq_next = itemq->ri_next;
1540                 buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
1541                 switch (ITEM_TYPE(itemq)) {
1542                 case XFS_LI_BUF:
1543                         flags = buf_f->blf_flags;
1544                         break;
1545                 case XFS_LI_6_1_BUF:
1546                 case XFS_LI_5_3_BUF:
1547                         obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1548                         flags = obuf_f->blf_flags;
1549                         break;
1550                 }
1551
1552                 switch (ITEM_TYPE(itemq)) {
1553                 case XFS_LI_BUF:
1554                 case XFS_LI_6_1_BUF:
1555                 case XFS_LI_5_3_BUF:
1556                         if (!(flags & XFS_BLI_CANCEL)) {
1557                                 xlog_recover_insert_item_frontq(&trans->r_itemq,
1558                                                                 itemq);
1559                                 break;
1560                         }
1561                 case XFS_LI_INODE:
1562                 case XFS_LI_6_1_INODE:
1563                 case XFS_LI_5_3_INODE:
1564                 case XFS_LI_DQUOT:
1565                 case XFS_LI_QUOTAOFF:
1566                 case XFS_LI_EFD:
1567                 case XFS_LI_EFI:
1568                         xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
1569                         break;
1570                 default:
1571                         xlog_warn(
1572         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1573                         ASSERT(0);
1574                         return XFS_ERROR(EIO);
1575                 }
1576                 itemq = itemq_next;
1577         } while (first_item != itemq);
1578         return 0;
1579 }
1580
1581 /*
1582  * Build up the table of buf cancel records so that we don't replay
1583  * cancelled data in the second pass.  For buffer records that are
1584  * not cancel records, there is nothing to do here so we just return.
1585  *
1586  * If we get a cancel record which is already in the table, this indicates
1587  * that the buffer was cancelled multiple times.  In order to ensure
1588  * that during pass 2 we keep the record in the table until we reach its
1589  * last occurrence in the log, we keep a reference count in the cancel
1590  * record in the table to tell us how many times we expect to see this
1591  * record during the second pass.
1592  */
1593 STATIC void
1594 xlog_recover_do_buffer_pass1(
1595         xlog_t                  *log,
1596         xfs_buf_log_format_t    *buf_f)
1597 {
1598         xfs_buf_cancel_t        *bcp;
1599         xfs_buf_cancel_t        *nextp;
1600         xfs_buf_cancel_t        *prevp;
1601         xfs_buf_cancel_t        **bucket;
1602         xfs_buf_log_format_v1_t *obuf_f;
1603         xfs_daddr_t             blkno = 0;
1604         uint                    len = 0;
1605         ushort                  flags = 0;
1606
1607         switch (buf_f->blf_type) {
1608         case XFS_LI_BUF:
1609                 blkno = buf_f->blf_blkno;
1610                 len = buf_f->blf_len;
1611                 flags = buf_f->blf_flags;
1612                 break;
1613         case XFS_LI_6_1_BUF:
1614         case XFS_LI_5_3_BUF:
1615                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1616                 blkno = (xfs_daddr_t) obuf_f->blf_blkno;
1617                 len = obuf_f->blf_len;
1618                 flags = obuf_f->blf_flags;
1619                 break;
1620         }
1621
1622         /*
1623          * If this isn't a cancel buffer item, then just return.
1624          */
1625         if (!(flags & XFS_BLI_CANCEL))
1626                 return;
1627
1628         /*
1629          * Insert an xfs_buf_cancel record into the hash table of
1630          * them.  If there is already an identical record, bump
1631          * its reference count.
1632          */
1633         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1634                                           XLOG_BC_TABLE_SIZE];
1635         /*
1636          * If the hash bucket is empty then just insert a new record into
1637          * the bucket.
1638          */
1639         if (*bucket == NULL) {
1640                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1641                                                      KM_SLEEP);
1642                 bcp->bc_blkno = blkno;
1643                 bcp->bc_len = len;
1644                 bcp->bc_refcount = 1;
1645                 bcp->bc_next = NULL;
1646                 *bucket = bcp;
1647                 return;
1648         }
1649
1650         /*
1651          * The hash bucket is not empty, so search for duplicates of our
1652          * record.  If we find one them just bump its refcount.  If not
1653          * then add us at the end of the list.
1654          */
1655         prevp = NULL;
1656         nextp = *bucket;
1657         while (nextp != NULL) {
1658                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1659                         nextp->bc_refcount++;
1660                         return;
1661                 }
1662                 prevp = nextp;
1663                 nextp = nextp->bc_next;
1664         }
1665         ASSERT(prevp != NULL);
1666         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1667                                              KM_SLEEP);
1668         bcp->bc_blkno = blkno;
1669         bcp->bc_len = len;
1670         bcp->bc_refcount = 1;
1671         bcp->bc_next = NULL;
1672         prevp->bc_next = bcp;
1673 }
1674
1675 /*
1676  * Check to see whether the buffer being recovered has a corresponding
1677  * entry in the buffer cancel record table.  If it does then return 1
1678  * so that it will be cancelled, otherwise return 0.  If the buffer is
1679  * actually a buffer cancel item (XFS_BLI_CANCEL is set), then decrement
1680  * the refcount on the entry in the table and remove it from the table
1681  * if this is the last reference.
1682  *
1683  * We remove the cancel record from the table when we encounter its
1684  * last occurrence in the log so that if the same buffer is re-used
1685  * again after its last cancellation we actually replay the changes
1686  * made at that point.
1687  */
1688 STATIC int
1689 xlog_check_buffer_cancelled(
1690         xlog_t                  *log,
1691         xfs_daddr_t             blkno,
1692         uint                    len,
1693         ushort                  flags)
1694 {
1695         xfs_buf_cancel_t        *bcp;
1696         xfs_buf_cancel_t        *prevp;
1697         xfs_buf_cancel_t        **bucket;
1698
1699         if (log->l_buf_cancel_table == NULL) {
1700                 /*
1701                  * There is nothing in the table built in pass one,
1702                  * so this buffer must not be cancelled.
1703                  */
1704                 ASSERT(!(flags & XFS_BLI_CANCEL));
1705                 return 0;
1706         }
1707
1708         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1709                                           XLOG_BC_TABLE_SIZE];
1710         bcp = *bucket;
1711         if (bcp == NULL) {
1712                 /*
1713                  * There is no corresponding entry in the table built
1714                  * in pass one, so this buffer has not been cancelled.
1715                  */
1716                 ASSERT(!(flags & XFS_BLI_CANCEL));
1717                 return 0;
1718         }
1719
1720         /*
1721          * Search for an entry in the buffer cancel table that
1722          * matches our buffer.
1723          */
1724         prevp = NULL;
1725         while (bcp != NULL) {
1726                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1727                         /*
1728                          * We've go a match, so return 1 so that the
1729                          * recovery of this buffer is cancelled.
1730                          * If this buffer is actually a buffer cancel
1731                          * log item, then decrement the refcount on the
1732                          * one in the table and remove it if this is the
1733                          * last reference.
1734                          */
1735                         if (flags & XFS_BLI_CANCEL) {
1736                                 bcp->bc_refcount--;
1737                                 if (bcp->bc_refcount == 0) {
1738                                         if (prevp == NULL) {
1739                                                 *bucket = bcp->bc_next;
1740                                         } else {
1741                                                 prevp->bc_next = bcp->bc_next;
1742                                         }
1743                                         kmem_free(bcp,
1744                                                   sizeof(xfs_buf_cancel_t));
1745                                 }
1746                         }
1747                         return 1;
1748                 }
1749                 prevp = bcp;
1750                 bcp = bcp->bc_next;
1751         }
1752         /*
1753          * We didn't find a corresponding entry in the table, so
1754          * return 0 so that the buffer is NOT cancelled.
1755          */
1756         ASSERT(!(flags & XFS_BLI_CANCEL));
1757         return 0;
1758 }
1759
1760 STATIC int
1761 xlog_recover_do_buffer_pass2(
1762         xlog_t                  *log,
1763         xfs_buf_log_format_t    *buf_f)
1764 {
1765         xfs_buf_log_format_v1_t *obuf_f;
1766         xfs_daddr_t             blkno = 0;
1767         ushort                  flags = 0;
1768         uint                    len = 0;
1769
1770         switch (buf_f->blf_type) {
1771         case XFS_LI_BUF:
1772                 blkno = buf_f->blf_blkno;
1773                 flags = buf_f->blf_flags;
1774                 len = buf_f->blf_len;
1775                 break;
1776         case XFS_LI_6_1_BUF:
1777         case XFS_LI_5_3_BUF:
1778                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1779                 blkno = (xfs_daddr_t) obuf_f->blf_blkno;
1780                 flags = obuf_f->blf_flags;
1781                 len = (xfs_daddr_t) obuf_f->blf_len;
1782                 break;
1783         }
1784
1785         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1786 }
1787
1788 /*
1789  * Perform recovery for a buffer full of inodes.  In these buffers,
1790  * the only data which should be recovered is that which corresponds
1791  * to the di_next_unlinked pointers in the on disk inode structures.
1792  * The rest of the data for the inodes is always logged through the
1793  * inodes themselves rather than the inode buffer and is recovered
1794  * in xlog_recover_do_inode_trans().
1795  *
1796  * The only time when buffers full of inodes are fully recovered is
1797  * when the buffer is full of newly allocated inodes.  In this case
1798  * the buffer will not be marked as an inode buffer and so will be
1799  * sent to xlog_recover_do_reg_buffer() below during recovery.
1800  */
1801 STATIC int
1802 xlog_recover_do_inode_buffer(
1803         xfs_mount_t             *mp,
1804         xlog_recover_item_t     *item,
1805         xfs_buf_t               *bp,
1806         xfs_buf_log_format_t    *buf_f)
1807 {
1808         int                     i;
1809         int                     item_index;
1810         int                     bit;
1811         int                     nbits;
1812         int                     reg_buf_offset;
1813         int                     reg_buf_bytes;
1814         int                     next_unlinked_offset;
1815         int                     inodes_per_buf;
1816         xfs_agino_t             *logged_nextp;
1817         xfs_agino_t             *buffer_nextp;
1818         xfs_buf_log_format_v1_t *obuf_f;
1819         unsigned int            *data_map = NULL;
1820         unsigned int            map_size = 0;
1821
1822         switch (buf_f->blf_type) {
1823         case XFS_LI_BUF:
1824                 data_map = buf_f->blf_data_map;
1825                 map_size = buf_f->blf_map_size;
1826                 break;
1827         case XFS_LI_6_1_BUF:
1828         case XFS_LI_5_3_BUF:
1829                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1830                 data_map = obuf_f->blf_data_map;
1831                 map_size = obuf_f->blf_map_size;
1832                 break;
1833         }
1834         /*
1835          * Set the variables corresponding to the current region to
1836          * 0 so that we'll initialize them on the first pass through
1837          * the loop.
1838          */
1839         reg_buf_offset = 0;
1840         reg_buf_bytes = 0;
1841         bit = 0;
1842         nbits = 0;
1843         item_index = 0;
1844         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1845         for (i = 0; i < inodes_per_buf; i++) {
1846                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1847                         offsetof(xfs_dinode_t, di_next_unlinked);
1848
1849                 while (next_unlinked_offset >=
1850                        (reg_buf_offset + reg_buf_bytes)) {
1851                         /*
1852                          * The next di_next_unlinked field is beyond
1853                          * the current logged region.  Find the next
1854                          * logged region that contains or is beyond
1855                          * the current di_next_unlinked field.
1856                          */
1857                         bit += nbits;
1858                         bit = xfs_next_bit(data_map, map_size, bit);
1859
1860                         /*
1861                          * If there are no more logged regions in the
1862                          * buffer, then we're done.
1863                          */
1864                         if (bit == -1) {
1865                                 return 0;
1866                         }
1867
1868                         nbits = xfs_contig_bits(data_map, map_size,
1869                                                          bit);
1870                         ASSERT(nbits > 0);
1871                         reg_buf_offset = bit << XFS_BLI_SHIFT;
1872                         reg_buf_bytes = nbits << XFS_BLI_SHIFT;
1873                         item_index++;
1874                 }
1875
1876                 /*
1877                  * If the current logged region starts after the current
1878                  * di_next_unlinked field, then move on to the next
1879                  * di_next_unlinked field.
1880                  */
1881                 if (next_unlinked_offset < reg_buf_offset) {
1882                         continue;
1883                 }
1884
1885                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1886                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLI_CHUNK) == 0);
1887                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1888
1889                 /*
1890                  * The current logged region contains a copy of the
1891                  * current di_next_unlinked field.  Extract its value
1892                  * and copy it to the buffer copy.
1893                  */
1894                 logged_nextp = (xfs_agino_t *)
1895                                ((char *)(item->ri_buf[item_index].i_addr) +
1896                                 (next_unlinked_offset - reg_buf_offset));
1897                 if (unlikely(*logged_nextp == 0)) {
1898                         xfs_fs_cmn_err(CE_ALERT, mp,
1899                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1900                                 item, bp);
1901                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1902                                          XFS_ERRLEVEL_LOW, mp);
1903                         return XFS_ERROR(EFSCORRUPTED);
1904                 }
1905
1906                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1907                                               next_unlinked_offset);
1908                 INT_SET(*buffer_nextp, ARCH_CONVERT, *logged_nextp);
1909         }
1910
1911         return 0;
1912 }
1913
1914 /*
1915  * Perform a 'normal' buffer recovery.  Each logged region of the
1916  * buffer should be copied over the corresponding region in the
1917  * given buffer.  The bitmap in the buf log format structure indicates
1918  * where to place the logged data.
1919  */
1920 /*ARGSUSED*/
1921 STATIC void
1922 xlog_recover_do_reg_buffer(
1923         xfs_mount_t             *mp,
1924         xlog_recover_item_t     *item,
1925         xfs_buf_t               *bp,
1926         xfs_buf_log_format_t    *buf_f)
1927 {
1928         int                     i;
1929         int                     bit;
1930         int                     nbits;
1931         xfs_buf_log_format_v1_t *obuf_f;
1932         unsigned int            *data_map = NULL;
1933         unsigned int            map_size = 0;
1934         int                     error;
1935
1936         switch (buf_f->blf_type) {
1937         case XFS_LI_BUF:
1938                 data_map = buf_f->blf_data_map;
1939                 map_size = buf_f->blf_map_size;
1940                 break;
1941         case XFS_LI_6_1_BUF:
1942         case XFS_LI_5_3_BUF:
1943                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
1944                 data_map = obuf_f->blf_data_map;
1945                 map_size = obuf_f->blf_map_size;
1946                 break;
1947         }
1948         bit = 0;
1949         i = 1;  /* 0 is the buf format structure */
1950         while (1) {
1951                 bit = xfs_next_bit(data_map, map_size, bit);
1952                 if (bit == -1)
1953                         break;
1954                 nbits = xfs_contig_bits(data_map, map_size, bit);
1955                 ASSERT(nbits > 0);
1956                 ASSERT(item->ri_buf[i].i_addr != 0);
1957                 ASSERT(item->ri_buf[i].i_len % XFS_BLI_CHUNK == 0);
1958                 ASSERT(XFS_BUF_COUNT(bp) >=
1959                        ((uint)bit << XFS_BLI_SHIFT)+(nbits<<XFS_BLI_SHIFT));
1960
1961                 /*
1962                  * Do a sanity check if this is a dquot buffer. Just checking
1963                  * the first dquot in the buffer should do. XXXThis is
1964                  * probably a good thing to do for other buf types also.
1965                  */
1966                 error = 0;
1967                 if (buf_f->blf_flags &
1968                    (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
1969                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1970                                                item->ri_buf[i].i_addr,
1971                                                -1, 0, XFS_QMOPT_DOWARN,
1972                                                "dquot_buf_recover");
1973                 }
1974                 if (!error)
1975                         memcpy(xfs_buf_offset(bp,
1976                                 (uint)bit << XFS_BLI_SHIFT),    /* dest */
1977                                 item->ri_buf[i].i_addr,         /* source */
1978                                 nbits<<XFS_BLI_SHIFT);          /* length */
1979                 i++;
1980                 bit += nbits;
1981         }
1982
1983         /* Shouldn't be any more regions */
1984         ASSERT(i == item->ri_total);
1985 }
1986
1987 /*
1988  * Do some primitive error checking on ondisk dquot data structures.
1989  */
1990 int
1991 xfs_qm_dqcheck(
1992         xfs_disk_dquot_t *ddq,
1993         xfs_dqid_t       id,
1994         uint             type,    /* used only when IO_dorepair is true */
1995         uint             flags,
1996         char             *str)
1997 {
1998         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
1999         int             errs = 0;
2000
2001         /*
2002          * We can encounter an uninitialized dquot buffer for 2 reasons:
2003          * 1. If we crash while deleting the quotainode(s), and those blks got
2004          *    used for user data. This is because we take the path of regular
2005          *    file deletion; however, the size field of quotainodes is never
2006          *    updated, so all the tricks that we play in itruncate_finish
2007          *    don't quite matter.
2008          *
2009          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2010          *    But the allocation will be replayed so we'll end up with an
2011          *    uninitialized quota block.
2012          *
2013          * This is all fine; things are still consistent, and we haven't lost
2014          * any quota information. Just don't complain about bad dquot blks.
2015          */
2016         if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
2017                 if (flags & XFS_QMOPT_DOWARN)
2018                         cmn_err(CE_ALERT,
2019                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2020                         str, id,
2021                         INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
2022                 errs++;
2023         }
2024         if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
2025                 if (flags & XFS_QMOPT_DOWARN)
2026                         cmn_err(CE_ALERT,
2027                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2028                         str, id,
2029                         INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
2030                 errs++;
2031         }
2032
2033         if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
2034             INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_PROJ &&
2035             INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
2036                 if (flags & XFS_QMOPT_DOWARN)
2037                         cmn_err(CE_ALERT,
2038                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2039                         str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
2040                 errs++;
2041         }
2042
2043         if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
2044                 if (flags & XFS_QMOPT_DOWARN)
2045                         cmn_err(CE_ALERT,
2046                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2047                         "0x%x expected, found id 0x%x",
2048                         str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
2049                 errs++;
2050         }
2051
2052         if (!errs && ddq->d_id) {
2053                 if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
2054                     INT_GET(ddq->d_bcount, ARCH_CONVERT) >=
2055                                 INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
2056                         if (!ddq->d_btimer) {
2057                                 if (flags & XFS_QMOPT_DOWARN)
2058                                         cmn_err(CE_ALERT,
2059                                         "%s : Dquot ID 0x%x (0x%p) "
2060                                         "BLK TIMER NOT STARTED",
2061                                         str, (int)
2062                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2063                                 errs++;
2064                         }
2065                 }
2066                 if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
2067                     INT_GET(ddq->d_icount, ARCH_CONVERT) >=
2068                                 INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
2069                         if (!ddq->d_itimer) {
2070                                 if (flags & XFS_QMOPT_DOWARN)
2071                                         cmn_err(CE_ALERT,
2072                                         "%s : Dquot ID 0x%x (0x%p) "
2073                                         "INODE TIMER NOT STARTED",
2074                                         str, (int)
2075                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2076                                 errs++;
2077                         }
2078                 }
2079                 if (INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT) &&
2080                     INT_GET(ddq->d_rtbcount, ARCH_CONVERT) >=
2081                                 INT_GET(ddq->d_rtb_softlimit, ARCH_CONVERT)) {
2082                         if (!ddq->d_rtbtimer) {
2083                                 if (flags & XFS_QMOPT_DOWARN)
2084                                         cmn_err(CE_ALERT,
2085                                         "%s : Dquot ID 0x%x (0x%p) "
2086                                         "RTBLK TIMER NOT STARTED",
2087                                         str, (int)
2088                                         INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
2089                                 errs++;
2090                         }
2091                 }
2092         }
2093
2094         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2095                 return errs;
2096
2097         if (flags & XFS_QMOPT_DOWARN)
2098                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2099
2100         /*
2101          * Typically, a repair is only requested by quotacheck.
2102          */
2103         ASSERT(id != -1);
2104         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2105         memset(d, 0, sizeof(xfs_dqblk_t));
2106         INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
2107         INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
2108         INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
2109         INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
2110
2111         return errs;
2112 }
2113
2114 /*
2115  * Perform a dquot buffer recovery.
2116  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2117  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2118  * Else, treat it as a regular buffer and do recovery.
2119  */
2120 STATIC void
2121 xlog_recover_do_dquot_buffer(
2122         xfs_mount_t             *mp,
2123         xlog_t                  *log,
2124         xlog_recover_item_t     *item,
2125         xfs_buf_t               *bp,
2126         xfs_buf_log_format_t    *buf_f)
2127 {
2128         uint                    type;
2129
2130         /*
2131          * Filesystems are required to send in quota flags at mount time.
2132          */
2133         if (mp->m_qflags == 0) {
2134                 return;
2135         }
2136
2137         type = 0;
2138         if (buf_f->blf_flags & XFS_BLI_UDQUOT_BUF)
2139                 type |= XFS_DQ_USER;
2140         if (buf_f->blf_flags & XFS_BLI_PDQUOT_BUF)
2141                 type |= XFS_DQ_PROJ;
2142         if (buf_f->blf_flags & XFS_BLI_GDQUOT_BUF)
2143                 type |= XFS_DQ_GROUP;
2144         /*
2145          * This type of quotas was turned off, so ignore this buffer
2146          */
2147         if (log->l_quotaoffs_flag & type)
2148                 return;
2149
2150         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2151 }
2152
2153 /*
2154  * This routine replays a modification made to a buffer at runtime.
2155  * There are actually two types of buffer, regular and inode, which
2156  * are handled differently.  Inode buffers are handled differently
2157  * in that we only recover a specific set of data from them, namely
2158  * the inode di_next_unlinked fields.  This is because all other inode
2159  * data is actually logged via inode records and any data we replay
2160  * here which overlaps that may be stale.
2161  *
2162  * When meta-data buffers are freed at run time we log a buffer item
2163  * with the XFS_BLI_CANCEL bit set to indicate that previous copies
2164  * of the buffer in the log should not be replayed at recovery time.
2165  * This is so that if the blocks covered by the buffer are reused for
2166  * file data before we crash we don't end up replaying old, freed
2167  * meta-data into a user's file.
2168  *
2169  * To handle the cancellation of buffer log items, we make two passes
2170  * over the log during recovery.  During the first we build a table of
2171  * those buffers which have been cancelled, and during the second we
2172  * only replay those buffers which do not have corresponding cancel
2173  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2174  * for more details on the implementation of the table of cancel records.
2175  */
2176 STATIC int
2177 xlog_recover_do_buffer_trans(
2178         xlog_t                  *log,
2179         xlog_recover_item_t     *item,
2180         int                     pass)
2181 {
2182         xfs_buf_log_format_t    *buf_f;
2183         xfs_buf_log_format_v1_t *obuf_f;
2184         xfs_mount_t             *mp;
2185         xfs_buf_t               *bp;
2186         int                     error;
2187         int                     cancel;
2188         xfs_daddr_t             blkno;
2189         int                     len;
2190         ushort                  flags;
2191
2192         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2193
2194         if (pass == XLOG_RECOVER_PASS1) {
2195                 /*
2196                  * In this pass we're only looking for buf items
2197                  * with the XFS_BLI_CANCEL bit set.
2198                  */
2199                 xlog_recover_do_buffer_pass1(log, buf_f);
2200                 return 0;
2201         } else {
2202                 /*
2203                  * In this pass we want to recover all the buffers
2204                  * which have not been cancelled and are not
2205                  * cancellation buffers themselves.  The routine
2206                  * we call here will tell us whether or not to
2207                  * continue with the replay of this buffer.
2208                  */
2209                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2210                 if (cancel) {
2211                         return 0;
2212                 }
2213         }
2214         switch (buf_f->blf_type) {
2215         case XFS_LI_BUF:
2216                 blkno = buf_f->blf_blkno;
2217                 len = buf_f->blf_len;
2218                 flags = buf_f->blf_flags;
2219                 break;
2220         case XFS_LI_6_1_BUF:
2221         case XFS_LI_5_3_BUF:
2222                 obuf_f = (xfs_buf_log_format_v1_t*)buf_f;
2223                 blkno = obuf_f->blf_blkno;
2224                 len = obuf_f->blf_len;
2225                 flags = obuf_f->blf_flags;
2226                 break;
2227         default:
2228                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2229                         "xfs_log_recover: unknown buffer type 0x%x, dev %s",
2230                         buf_f->blf_type, XFS_BUFTARG_NAME(log->l_targ));
2231                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2232                                  XFS_ERRLEVEL_LOW, log->l_mp);
2233                 return XFS_ERROR(EFSCORRUPTED);
2234         }
2235
2236         mp = log->l_mp;
2237         if (flags & XFS_BLI_INODE_BUF) {
2238                 bp = xfs_buf_read_flags(mp->m_ddev_targp, blkno, len,
2239                                                                 XFS_BUF_LOCK);
2240         } else {
2241                 bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, 0);
2242         }
2243         if (XFS_BUF_ISERROR(bp)) {
2244                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2245                                   bp, blkno);
2246                 error = XFS_BUF_GETERROR(bp);
2247                 xfs_buf_relse(bp);
2248                 return error;
2249         }
2250
2251         error = 0;
2252         if (flags & XFS_BLI_INODE_BUF) {
2253                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2254         } else if (flags &
2255                   (XFS_BLI_UDQUOT_BUF|XFS_BLI_PDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
2256                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2257         } else {
2258                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2259         }
2260         if (error)
2261                 return XFS_ERROR(error);
2262
2263         /*
2264          * Perform delayed write on the buffer.  Asynchronous writes will be
2265          * slower when taking into account all the buffers to be flushed.
2266          *
2267          * Also make sure that only inode buffers with good sizes stay in
2268          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2269          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2270          * buffers in the log can be a different size if the log was generated
2271          * by an older kernel using unclustered inode buffers or a newer kernel
2272          * running with a different inode cluster size.  Regardless, if the
2273          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2274          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2275          * the buffer out of the buffer cache so that the buffer won't
2276          * overlap with future reads of those inodes.
2277          */
2278         if (XFS_DINODE_MAGIC ==
2279             INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) &&
2280             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2281                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2282                 XFS_BUF_STALE(bp);
2283                 error = xfs_bwrite(mp, bp);
2284         } else {
2285                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2286                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2287                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2288                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2289                 xfs_bdwrite(mp, bp);
2290         }
2291
2292         return (error);
2293 }
2294
2295 STATIC int
2296 xlog_recover_do_inode_trans(
2297         xlog_t                  *log,
2298         xlog_recover_item_t     *item,
2299         int                     pass)
2300 {
2301         xfs_inode_log_format_t  *in_f;
2302         xfs_mount_t             *mp;
2303         xfs_buf_t               *bp;
2304         xfs_imap_t              imap;
2305         xfs_dinode_t            *dip;
2306         xfs_ino_t               ino;
2307         int                     len;
2308         xfs_caddr_t             src;
2309         xfs_caddr_t             dest;
2310         int                     error;
2311         int                     attr_index;
2312         uint                    fields;
2313         xfs_dinode_core_t       *dicp;
2314
2315         if (pass == XLOG_RECOVER_PASS1) {
2316                 return 0;
2317         }
2318
2319         in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2320         ino = in_f->ilf_ino;
2321         mp = log->l_mp;
2322         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2323                 imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno;
2324                 imap.im_len = in_f->ilf_len;
2325                 imap.im_boffset = in_f->ilf_boffset;
2326         } else {
2327                 /*
2328                  * It's an old inode format record.  We don't know where
2329                  * its cluster is located on disk, and we can't allow
2330                  * xfs_imap() to figure it out because the inode btrees
2331                  * are not ready to be used.  Therefore do not pass the
2332                  * XFS_IMAP_LOOKUP flag to xfs_imap().  This will give
2333                  * us only the single block in which the inode lives
2334                  * rather than its cluster, so we must make sure to
2335                  * invalidate the buffer when we write it out below.
2336                  */
2337                 imap.im_blkno = 0;
2338                 xfs_imap(log->l_mp, NULL, ino, &imap, 0);
2339         }
2340
2341         /*
2342          * Inode buffers can be freed, look out for it,
2343          * and do not replay the inode.
2344          */
2345         if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0))
2346                 return 0;
2347
2348         bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len,
2349                                                                 XFS_BUF_LOCK);
2350         if (XFS_BUF_ISERROR(bp)) {
2351                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2352                                   bp, imap.im_blkno);
2353                 error = XFS_BUF_GETERROR(bp);
2354                 xfs_buf_relse(bp);
2355                 return error;
2356         }
2357         error = 0;
2358         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2359         dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
2360
2361         /*
2362          * Make sure the place we're flushing out to really looks
2363          * like an inode!
2364          */
2365         if (unlikely(INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC)) {
2366                 xfs_buf_relse(bp);
2367                 xfs_fs_cmn_err(CE_ALERT, mp,
2368                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2369                         dip, bp, ino);
2370                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2371                                  XFS_ERRLEVEL_LOW, mp);
2372                 return XFS_ERROR(EFSCORRUPTED);
2373         }
2374         dicp = (xfs_dinode_core_t*)(item->ri_buf[1].i_addr);
2375         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2376                 xfs_buf_relse(bp);
2377                 xfs_fs_cmn_err(CE_ALERT, mp,
2378                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2379                         item, ino);
2380                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2381                                  XFS_ERRLEVEL_LOW, mp);
2382                 return XFS_ERROR(EFSCORRUPTED);
2383         }
2384
2385         /* Skip replay when the on disk inode is newer than the log one */
2386         if (dicp->di_flushiter <
2387             INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)) {
2388                 /*
2389                  * Deal with the wrap case, DI_MAX_FLUSH is less
2390                  * than smaller numbers
2391                  */
2392                 if ((INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT)
2393                                                         == DI_MAX_FLUSH) &&
2394                     (dicp->di_flushiter < (DI_MAX_FLUSH>>1))) {
2395                         /* do nothing */
2396                 } else {
2397                         xfs_buf_relse(bp);
2398                         return 0;
2399                 }
2400         }
2401         /* Take the opportunity to reset the flush iteration count */
2402         dicp->di_flushiter = 0;
2403
2404         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2405                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2406                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2407                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2408                                          XFS_ERRLEVEL_LOW, mp, dicp);
2409                         xfs_buf_relse(bp);
2410                         xfs_fs_cmn_err(CE_ALERT, mp,
2411                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2412                                 item, dip, bp, ino);
2413                         return XFS_ERROR(EFSCORRUPTED);
2414                 }
2415         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2416                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2417                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2418                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2419                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2420                                              XFS_ERRLEVEL_LOW, mp, dicp);
2421                         xfs_buf_relse(bp);
2422                         xfs_fs_cmn_err(CE_ALERT, mp,
2423                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2424                                 item, dip, bp, ino);
2425                         return XFS_ERROR(EFSCORRUPTED);
2426                 }
2427         }
2428         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2429                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2430                                      XFS_ERRLEVEL_LOW, mp, dicp);
2431                 xfs_buf_relse(bp);
2432                 xfs_fs_cmn_err(CE_ALERT, mp,
2433                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2434                         item, dip, bp, ino,
2435                         dicp->di_nextents + dicp->di_anextents,
2436                         dicp->di_nblocks);
2437                 return XFS_ERROR(EFSCORRUPTED);
2438         }
2439         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2440                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2441                                      XFS_ERRLEVEL_LOW, mp, dicp);
2442                 xfs_buf_relse(bp);
2443                 xfs_fs_cmn_err(CE_ALERT, mp,
2444                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2445                         item, dip, bp, ino, dicp->di_forkoff);
2446                 return XFS_ERROR(EFSCORRUPTED);
2447         }
2448         if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) {
2449                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2450                                      XFS_ERRLEVEL_LOW, mp, dicp);
2451                 xfs_buf_relse(bp);
2452                 xfs_fs_cmn_err(CE_ALERT, mp,
2453                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2454                         item->ri_buf[1].i_len, item);
2455                 return XFS_ERROR(EFSCORRUPTED);
2456         }
2457
2458         /* The core is in in-core format */
2459         xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
2460                               (xfs_dinode_core_t*)item->ri_buf[1].i_addr, -1);
2461
2462         /* the rest is in on-disk format */
2463         if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) {
2464                 memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t),
2465                         item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t),
2466                         item->ri_buf[1].i_len  - sizeof(xfs_dinode_core_t));
2467         }
2468
2469         fields = in_f->ilf_fields;
2470         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2471         case XFS_ILOG_DEV:
2472                 INT_SET(dip->di_u.di_dev, ARCH_CONVERT, in_f->ilf_u.ilfu_rdev);
2473
2474                 break;
2475         case XFS_ILOG_UUID:
2476                 dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid;
2477                 break;
2478         }
2479
2480         if (in_f->ilf_size == 2)
2481                 goto write_inode_buffer;
2482         len = item->ri_buf[2].i_len;
2483         src = item->ri_buf[2].i_addr;
2484         ASSERT(in_f->ilf_size <= 4);
2485         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2486         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2487                (len == in_f->ilf_dsize));
2488
2489         switch (fields & XFS_ILOG_DFORK) {
2490         case XFS_ILOG_DDATA:
2491         case XFS_ILOG_DEXT:
2492                 memcpy(&dip->di_u, src, len);
2493                 break;
2494
2495         case XFS_ILOG_DBROOT:
2496                 xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2497                                  &(dip->di_u.di_bmbt),
2498                                  XFS_DFORK_DSIZE(dip, mp));
2499                 break;
2500
2501         default:
2502                 /*
2503                  * There are no data fork flags set.
2504                  */
2505                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2506                 break;
2507         }
2508
2509         /*
2510          * If we logged any attribute data, recover it.  There may or
2511          * may not have been any other non-core data logged in this
2512          * transaction.
2513          */
2514         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2515                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2516                         attr_index = 3;
2517                 } else {
2518                         attr_index = 2;
2519                 }
2520                 len = item->ri_buf[attr_index].i_len;
2521                 src = item->ri_buf[attr_index].i_addr;
2522                 ASSERT(len == in_f->ilf_asize);
2523
2524                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2525                 case XFS_ILOG_ADATA:
2526                 case XFS_ILOG_AEXT:
2527                         dest = XFS_DFORK_APTR(dip);
2528                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2529                         memcpy(dest, src, len);
2530                         break;
2531
2532                 case XFS_ILOG_ABROOT:
2533                         dest = XFS_DFORK_APTR(dip);
2534                         xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len,
2535                                          (xfs_bmdr_block_t*)dest,
2536                                          XFS_DFORK_ASIZE(dip, mp));
2537                         break;
2538
2539                 default:
2540                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2541                         ASSERT(0);
2542                         xfs_buf_relse(bp);
2543                         return XFS_ERROR(EIO);
2544                 }
2545         }
2546
2547 write_inode_buffer:
2548         if (ITEM_TYPE(item) == XFS_LI_INODE) {
2549                 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2550                        XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2551                 XFS_BUF_SET_FSPRIVATE(bp, mp);
2552                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2553                 xfs_bdwrite(mp, bp);
2554         } else {
2555                 XFS_BUF_STALE(bp);
2556                 error = xfs_bwrite(mp, bp);
2557         }
2558
2559         return (error);
2560 }
2561
2562 /*
2563  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2564  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2565  * of that type.
2566  */
2567 STATIC int
2568 xlog_recover_do_quotaoff_trans(
2569         xlog_t                  *log,
2570         xlog_recover_item_t     *item,
2571         int                     pass)
2572 {
2573         xfs_qoff_logformat_t    *qoff_f;
2574
2575         if (pass == XLOG_RECOVER_PASS2) {
2576                 return (0);
2577         }
2578
2579         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2580         ASSERT(qoff_f);
2581
2582         /*
2583          * The logitem format's flag tells us if this was user quotaoff,
2584          * group quotaoff or both.
2585          */
2586         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2587                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2588         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2589                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2590
2591         return (0);
2592 }
2593
2594 /*
2595  * Recover a dquot record
2596  */
2597 STATIC int
2598 xlog_recover_do_dquot_trans(
2599         xlog_t                  *log,
2600         xlog_recover_item_t     *item,
2601         int                     pass)
2602 {
2603         xfs_mount_t             *mp;
2604         xfs_buf_t               *bp;
2605         struct xfs_disk_dquot   *ddq, *recddq;
2606         int                     error;
2607         xfs_dq_logformat_t      *dq_f;
2608         uint                    type;
2609
2610         if (pass == XLOG_RECOVER_PASS1) {
2611                 return 0;
2612         }
2613         mp = log->l_mp;
2614
2615         /*
2616          * Filesystems are required to send in quota flags at mount time.
2617          */
2618         if (mp->m_qflags == 0)
2619                 return (0);
2620
2621         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2622         ASSERT(recddq);
2623         /*
2624          * This type of quotas was turned off, so ignore this record.
2625          */
2626         type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
2627                         (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2628         ASSERT(type);
2629         if (log->l_quotaoffs_flag & type)
2630                 return (0);
2631
2632         /*
2633          * At this point we know that quota was _not_ turned off.
2634          * Since the mount flags are not indicating to us otherwise, this
2635          * must mean that quota is on, and the dquot needs to be replayed.
2636          * Remember that we may not have fully recovered the superblock yet,
2637          * so we can't do the usual trick of looking at the SB quota bits.
2638          *
2639          * The other possibility, of course, is that the quota subsystem was
2640          * removed since the last mount - ENOSYS.
2641          */
2642         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2643         ASSERT(dq_f);
2644         if ((error = xfs_qm_dqcheck(recddq,
2645                            dq_f->qlf_id,
2646                            0, XFS_QMOPT_DOWARN,
2647                            "xlog_recover_do_dquot_trans (log copy)"))) {
2648                 return XFS_ERROR(EIO);
2649         }
2650         ASSERT(dq_f->qlf_len == 1);
2651
2652         error = xfs_read_buf(mp, mp->m_ddev_targp,
2653                              dq_f->qlf_blkno,
2654                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2655                              0, &bp);
2656         if (error) {
2657                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2658                                   bp, dq_f->qlf_blkno);
2659                 return error;
2660         }
2661         ASSERT(bp);
2662         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2663
2664         /*
2665          * At least the magic num portion should be on disk because this
2666          * was among a chunk of dquots created earlier, and we did some
2667          * minimal initialization then.
2668          */
2669         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2670                            "xlog_recover_do_dquot_trans")) {
2671                 xfs_buf_relse(bp);
2672                 return XFS_ERROR(EIO);
2673         }
2674
2675         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2676
2677         ASSERT(dq_f->qlf_size == 2);
2678         ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL ||
2679                XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp);
2680         XFS_BUF_SET_FSPRIVATE(bp, mp);
2681         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2682         xfs_bdwrite(mp, bp);
2683
2684         return (0);
2685 }
2686
2687 /*
2688  * This routine is called to create an in-core extent free intent
2689  * item from the efi format structure which was logged on disk.
2690  * It allocates an in-core efi, copies the extents from the format
2691  * structure into it, and adds the efi to the AIL with the given
2692  * LSN.
2693  */
2694 STATIC void
2695 xlog_recover_do_efi_trans(
2696         xlog_t                  *log,
2697         xlog_recover_item_t     *item,
2698         xfs_lsn_t               lsn,
2699         int                     pass)
2700 {
2701         xfs_mount_t             *mp;
2702         xfs_efi_log_item_t      *efip;
2703         xfs_efi_log_format_t    *efi_formatp;
2704         SPLDECL(s);
2705
2706         if (pass == XLOG_RECOVER_PASS1) {
2707                 return;
2708         }
2709
2710         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2711         ASSERT(item->ri_buf[0].i_len ==
2712                (sizeof(xfs_efi_log_format_t) +
2713                 ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t))));
2714
2715         mp = log->l_mp;
2716         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2717         memcpy((char *)&(efip->efi_format), (char *)efi_formatp,
2718               sizeof(xfs_efi_log_format_t) +
2719               ((efi_formatp->efi_nextents - 1) * sizeof(xfs_extent_t)));
2720         efip->efi_next_extent = efi_formatp->efi_nextents;
2721         efip->efi_flags |= XFS_EFI_COMMITTED;
2722
2723         AIL_LOCK(mp,s);
2724         /*
2725          * xfs_trans_update_ail() drops the AIL lock.
2726          */
2727         xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s);
2728 }
2729
2730
2731 /*
2732  * This routine is called when an efd format structure is found in
2733  * a committed transaction in the log.  It's purpose is to cancel
2734  * the corresponding efi if it was still in the log.  To do this
2735  * it searches the AIL for the efi with an id equal to that in the
2736  * efd format structure.  If we find it, we remove the efi from the
2737  * AIL and free it.
2738  */
2739 STATIC void
2740 xlog_recover_do_efd_trans(
2741         xlog_t                  *log,
2742         xlog_recover_item_t     *item,
2743         int                     pass)
2744 {
2745         xfs_mount_t             *mp;
2746         xfs_efd_log_format_t    *efd_formatp;
2747         xfs_efi_log_item_t      *efip = NULL;
2748         xfs_log_item_t          *lip;
2749         int                     gen;
2750         __uint64_t              efi_id;
2751         SPLDECL(s);
2752
2753         if (pass == XLOG_RECOVER_PASS1) {
2754                 return;
2755         }
2756
2757         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2758         ASSERT(item->ri_buf[0].i_len ==
2759                (sizeof(xfs_efd_log_format_t) +
2760                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_t))));
2761         efi_id = efd_formatp->efd_efi_id;
2762
2763         /*
2764          * Search for the efi with the id in the efd format structure
2765          * in the AIL.
2766          */
2767         mp = log->l_mp;
2768         AIL_LOCK(mp,s);
2769         lip = xfs_trans_first_ail(mp, &gen);
2770         while (lip != NULL) {
2771                 if (lip->li_type == XFS_LI_EFI) {
2772                         efip = (xfs_efi_log_item_t *)lip;
2773                         if (efip->efi_format.efi_id == efi_id) {
2774                                 /*
2775                                  * xfs_trans_delete_ail() drops the
2776                                  * AIL lock.
2777                                  */
2778                                 xfs_trans_delete_ail(mp, lip, s);
2779                                 break;
2780                         }
2781                 }
2782                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
2783         }
2784
2785         /*
2786          * If we found it, then free it up.  If it wasn't there, it
2787          * must have been overwritten in the log.  Oh well.
2788          */
2789         if (lip != NULL) {
2790                 xfs_efi_item_free(efip);
2791         } else {
2792                 AIL_UNLOCK(mp, s);
2793         }
2794 }
2795
2796 /*
2797  * Perform the transaction
2798  *
2799  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2800  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2801  */
2802 STATIC int
2803 xlog_recover_do_trans(
2804         xlog_t                  *log,
2805         xlog_recover_t          *trans,
2806         int                     pass)
2807 {
2808         int                     error = 0;
2809         xlog_recover_item_t     *item, *first_item;
2810
2811         if ((error = xlog_recover_reorder_trans(log, trans)))
2812                 return error;
2813         first_item = item = trans->r_itemq;
2814         do {
2815                 /*
2816                  * we don't need to worry about the block number being
2817                  * truncated in > 1 TB buffers because in user-land,
2818                  * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2819                  * the blkno's will get through the user-mode buffer
2820                  * cache properly.  The only bad case is o32 kernels
2821                  * where xfs_daddr_t is 32-bits but mount will warn us
2822                  * off a > 1 TB filesystem before we get here.
2823                  */
2824                 if ((ITEM_TYPE(item) == XFS_LI_BUF) ||
2825                     (ITEM_TYPE(item) == XFS_LI_6_1_BUF) ||
2826                     (ITEM_TYPE(item) == XFS_LI_5_3_BUF)) {
2827                         if  ((error = xlog_recover_do_buffer_trans(log, item,
2828                                                                  pass)))
2829                                 break;
2830                 } else if ((ITEM_TYPE(item) == XFS_LI_INODE) ||
2831                            (ITEM_TYPE(item) == XFS_LI_6_1_INODE) ||
2832                            (ITEM_TYPE(item) == XFS_LI_5_3_INODE)) {
2833                         if ((error = xlog_recover_do_inode_trans(log, item,
2834                                                                 pass)))
2835                                 break;
2836                 } else if (ITEM_TYPE(item) == XFS_LI_EFI) {
2837                         xlog_recover_do_efi_trans(log, item, trans->r_lsn,
2838                                                   pass);
2839                 } else if (ITEM_TYPE(item) == XFS_LI_EFD) {
2840                         xlog_recover_do_efd_trans(log, item, pass);
2841                 } else if (ITEM_TYPE(item) == XFS_LI_DQUOT) {
2842                         if ((error = xlog_recover_do_dquot_trans(log, item,
2843                                                                    pass)))
2844                                         break;
2845                 } else if ((ITEM_TYPE(item) == XFS_LI_QUOTAOFF)) {
2846                         if ((error = xlog_recover_do_quotaoff_trans(log, item,
2847                                                                    pass)))
2848                                         break;
2849                 } else {
2850                         xlog_warn("XFS: xlog_recover_do_trans");
2851                         ASSERT(0);
2852                         error = XFS_ERROR(EIO);
2853                         break;
2854                 }
2855                 item = item->ri_next;
2856         } while (first_item != item);
2857
2858         return error;
2859 }
2860
2861 /*
2862  * Free up any resources allocated by the transaction
2863  *
2864  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2865  */
2866 STATIC void
2867 xlog_recover_free_trans(
2868         xlog_recover_t          *trans)
2869 {
2870         xlog_recover_item_t     *first_item, *item, *free_item;
2871         int                     i;
2872
2873         item = first_item = trans->r_itemq;
2874         do {
2875                 free_item = item;
2876                 item = item->ri_next;
2877                  /* Free the regions in the item. */
2878                 for (i = 0; i < free_item->ri_cnt; i++) {
2879                         kmem_free(free_item->ri_buf[i].i_addr,
2880                                   free_item->ri_buf[i].i_len);
2881                 }
2882                 /* Free the item itself */
2883                 kmem_free(free_item->ri_buf,
2884                           (free_item->ri_total * sizeof(xfs_log_iovec_t)));
2885                 kmem_free(free_item, sizeof(xlog_recover_item_t));
2886         } while (first_item != item);
2887         /* Free the transaction recover structure */
2888         kmem_free(trans, sizeof(xlog_recover_t));
2889 }
2890
2891 STATIC int
2892 xlog_recover_commit_trans(
2893         xlog_t                  *log,
2894         xlog_recover_t          **q,
2895         xlog_recover_t          *trans,
2896         int                     pass)
2897 {
2898         int                     error;
2899
2900         if ((error = xlog_recover_unlink_tid(q, trans)))
2901                 return error;
2902         if ((error = xlog_recover_do_trans(log, trans, pass)))
2903                 return error;
2904         xlog_recover_free_trans(trans);                 /* no error */
2905         return 0;
2906 }
2907
2908 STATIC int
2909 xlog_recover_unmount_trans(
2910         xlog_recover_t          *trans)
2911 {
2912         /* Do nothing now */
2913         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2914         return 0;
2915 }
2916
2917 /*
2918  * There are two valid states of the r_state field.  0 indicates that the
2919  * transaction structure is in a normal state.  We have either seen the
2920  * start of the transaction or the last operation we added was not a partial
2921  * operation.  If the last operation we added to the transaction was a
2922  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2923  *
2924  * NOTE: skip LRs with 0 data length.
2925  */
2926 STATIC int
2927 xlog_recover_process_data(
2928         xlog_t                  *log,
2929         xlog_recover_t          *rhash[],
2930         xlog_rec_header_t       *rhead,
2931         xfs_caddr_t             dp,
2932         int                     pass)
2933 {
2934         xfs_caddr_t             lp;
2935         int                     num_logops;
2936         xlog_op_header_t        *ohead;
2937         xlog_recover_t          *trans;
2938         xlog_tid_t              tid;
2939         int                     error;
2940         unsigned long           hash;
2941         uint                    flags;
2942
2943         lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT);
2944         num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT);
2945
2946         /* check the log format matches our own - else we can't recover */
2947         if (xlog_header_check_recover(log->l_mp, rhead))
2948                 return (XFS_ERROR(EIO));
2949
2950         while ((dp < lp) && num_logops) {
2951                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2952                 ohead = (xlog_op_header_t *)dp;
2953                 dp += sizeof(xlog_op_header_t);
2954                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2955                     ohead->oh_clientid != XFS_LOG) {
2956                         xlog_warn(
2957                 "XFS: xlog_recover_process_data: bad clientid");
2958                         ASSERT(0);
2959                         return (XFS_ERROR(EIO));
2960                 }
2961                 tid = INT_GET(ohead->oh_tid, ARCH_CONVERT);
2962                 hash = XLOG_RHASH(tid);
2963                 trans = xlog_recover_find_tid(rhash[hash], tid);
2964                 if (trans == NULL) {               /* not found; add new tid */
2965                         if (ohead->oh_flags & XLOG_START_TRANS)
2966                                 xlog_recover_new_tid(&rhash[hash], tid,
2967                                         INT_GET(rhead->h_lsn, ARCH_CONVERT));
2968                 } else {
2969                         ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp);
2970                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2971                         if (flags & XLOG_WAS_CONT_TRANS)
2972                                 flags &= ~XLOG_CONTINUE_TRANS;
2973                         switch (flags) {
2974                         case XLOG_COMMIT_TRANS:
2975                                 error = xlog_recover_commit_trans(log,
2976                                                 &rhash[hash], trans, pass);
2977                                 break;
2978                         case XLOG_UNMOUNT_TRANS:
2979                                 error = xlog_recover_unmount_trans(trans);
2980                                 break;
2981                         case XLOG_WAS_CONT_TRANS:
2982                                 error = xlog_recover_add_to_cont_trans(trans,
2983                                                 dp, INT_GET(ohead->oh_len,
2984                                                         ARCH_CONVERT));
2985                                 break;
2986                         case XLOG_START_TRANS:
2987                                 xlog_warn(
2988                         "XFS: xlog_recover_process_data: bad transaction");
2989                                 ASSERT(0);
2990                                 error = XFS_ERROR(EIO);
2991                                 break;
2992                         case 0:
2993                         case XLOG_CONTINUE_TRANS:
2994                                 error = xlog_recover_add_to_trans(trans,
2995                                                 dp, INT_GET(ohead->oh_len,
2996                                                         ARCH_CONVERT));
2997                                 break;
2998                         default:
2999                                 xlog_warn(
3000                         "XFS: xlog_recover_process_data: bad flag");
3001                                 ASSERT(0);
3002                                 error = XFS_ERROR(EIO);
3003                                 break;
3004                         }
3005                         if (error)
3006                                 return error;
3007                 }
3008                 dp += INT_GET(ohead->oh_len, ARCH_CONVERT);
3009                 num_logops--;
3010         }
3011         return 0;
3012 }
3013
3014 /*
3015  * Process an extent free intent item that was recovered from
3016  * the log.  We need to free the extents that it describes.
3017  */
3018 STATIC void
3019 xlog_recover_process_efi(
3020         xfs_mount_t             *mp,
3021         xfs_efi_log_item_t      *efip)
3022 {
3023         xfs_efd_log_item_t      *efdp;
3024         xfs_trans_t             *tp;
3025         int                     i;
3026         xfs_extent_t            *extp;
3027         xfs_fsblock_t           startblock_fsb;
3028
3029         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3030
3031         /*
3032          * First check the validity of the extents described by the
3033          * EFI.  If any are bad, then assume that all are bad and
3034          * just toss the EFI.
3035          */
3036         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3037                 extp = &(efip->efi_format.efi_extents[i]);
3038                 startblock_fsb = XFS_BB_TO_FSB(mp,
3039                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3040                 if ((startblock_fsb == 0) ||
3041                     (extp->ext_len == 0) ||
3042                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3043                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3044                         /*
3045                          * This will pull the EFI from the AIL and
3046                          * free the memory associated with it.
3047                          */
3048                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3049                         return;
3050                 }
3051         }
3052
3053         tp = xfs_trans_alloc(mp, 0);
3054         xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3055         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3056
3057         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3058                 extp = &(efip->efi_format.efi_extents[i]);
3059                 xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3060                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3061                                          extp->ext_len);
3062         }
3063
3064         efip->efi_flags |= XFS_EFI_RECOVERED;
3065         xfs_trans_commit(tp, 0, NULL);
3066 }
3067
3068 /*
3069  * Verify that once we've encountered something other than an EFI
3070  * in the AIL that there are no more EFIs in the AIL.
3071  */
3072 #if defined(DEBUG)
3073 STATIC void
3074 xlog_recover_check_ail(
3075         xfs_mount_t             *mp,
3076         xfs_log_item_t          *lip,
3077         int                     gen)
3078 {
3079         int                     orig_gen = gen;
3080
3081         do {
3082                 ASSERT(lip->li_type != XFS_LI_EFI);
3083                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3084                 /*
3085                  * The check will be bogus if we restart from the
3086                  * beginning of the AIL, so ASSERT that we don't.
3087                  * We never should since we're holding the AIL lock
3088                  * the entire time.
3089                  */
3090                 ASSERT(gen == orig_gen);
3091         } while (lip != NULL);
3092 }
3093 #endif  /* DEBUG */
3094
3095 /*
3096  * When this is called, all of the EFIs which did not have
3097  * corresponding EFDs should be in the AIL.  What we do now
3098  * is free the extents associated with each one.
3099  *
3100  * Since we process the EFIs in normal transactions, they
3101  * will be removed at some point after the commit.  This prevents
3102  * us from just walking down the list processing each one.
3103  * We'll use a flag in the EFI to skip those that we've already
3104  * processed and use the AIL iteration mechanism's generation
3105  * count to try to speed this up at least a bit.
3106  *
3107  * When we start, we know that the EFIs are the only things in
3108  * the AIL.  As we process them, however, other items are added
3109  * to the AIL.  Since everything added to the AIL must come after
3110  * everything already in the AIL, we stop processing as soon as
3111  * we see something other than an EFI in the AIL.
3112  */
3113 STATIC void
3114 xlog_recover_process_efis(
3115         xlog_t                  *log)
3116 {
3117         xfs_log_item_t          *lip;
3118         xfs_efi_log_item_t      *efip;
3119         int                     gen;
3120         xfs_mount_t             *mp;
3121         SPLDECL(s);
3122
3123         mp = log->l_mp;
3124         AIL_LOCK(mp,s);
3125
3126         lip = xfs_trans_first_ail(mp, &gen);
3127         while (lip != NULL) {
3128                 /*
3129                  * We're done when we see something other than an EFI.
3130                  */
3131                 if (lip->li_type != XFS_LI_EFI) {
3132                         xlog_recover_check_ail(mp, lip, gen);
3133                         break;
3134                 }
3135
3136                 /*
3137                  * Skip EFIs that we've already processed.
3138                  */
3139                 efip = (xfs_efi_log_item_t *)lip;
3140                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3141                         lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3142                         continue;
3143                 }
3144
3145                 AIL_UNLOCK(mp, s);
3146                 xlog_recover_process_efi(mp, efip);
3147                 AIL_LOCK(mp,s);
3148                 lip = xfs_trans_next_ail(mp, lip, &gen, NULL);
3149         }
3150         AIL_UNLOCK(mp, s);
3151 }
3152
3153 /*
3154  * This routine performs a transaction to null out a bad inode pointer
3155  * in an agi unlinked inode hash bucket.
3156  */
3157 STATIC void
3158 xlog_recover_clear_agi_bucket(
3159         xfs_mount_t     *mp,
3160         xfs_agnumber_t  agno,
3161         int             bucket)
3162 {
3163         xfs_trans_t     *tp;
3164         xfs_agi_t       *agi;
3165         xfs_buf_t       *agibp;
3166         int             offset;
3167         int             error;
3168
3169         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3170         xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0);
3171
3172         error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
3173                                    XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3174                                    XFS_FSS_TO_BB(mp, 1), 0, &agibp);
3175         if (error) {
3176                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3177                 return;
3178         }
3179
3180         agi = XFS_BUF_TO_AGI(agibp);
3181         if (INT_GET(agi->agi_magicnum, ARCH_CONVERT) != XFS_AGI_MAGIC) {
3182                 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3183                 return;
3184         }
3185         ASSERT(INT_GET(agi->agi_magicnum, ARCH_CONVERT) == XFS_AGI_MAGIC);
3186
3187         INT_SET(agi->agi_unlinked[bucket], ARCH_CONVERT, NULLAGINO);
3188         offset = offsetof(xfs_agi_t, agi_unlinked) +
3189                  (sizeof(xfs_agino_t) * bucket);
3190         xfs_trans_log_buf(tp, agibp, offset,
3191                           (offset + sizeof(xfs_agino_t) - 1));
3192
3193         (void) xfs_trans_commit(tp, 0, NULL);
3194 }
3195
3196 /*
3197  * xlog_iunlink_recover
3198  *
3199  * This is called during recovery to process any inodes which
3200  * we unlinked but not freed when the system crashed.  These
3201  * inodes will be on the lists in the AGI blocks.  What we do
3202  * here is scan all the AGIs and fully truncate and free any
3203  * inodes found on the lists.  Each inode is removed from the
3204  * lists when it has been fully truncated and is freed.  The
3205  * freeing of the inode and its removal from the list must be
3206  * atomic.
3207  */
3208 void
3209 xlog_recover_process_iunlinks(
3210         xlog_t          *log)
3211 {
3212         xfs_mount_t     *mp;
3213         xfs_agnumber_t  agno;
3214         xfs_agi_t       *agi;
3215         xfs_buf_t       *agibp;
3216         xfs_buf_t       *ibp;
3217         xfs_dinode_t    *dip;
3218         xfs_inode_t     *ip;
3219         xfs_agino_t     agino;
3220         xfs_ino_t       ino;
3221         int             bucket;
3222         int             error;
3223         uint            mp_dmevmask;
3224
3225         mp = log->l_mp;
3226
3227         /*
3228          * Prevent any DMAPI event from being sent while in this function.
3229          */
3230         mp_dmevmask = mp->m_dmevmask;
3231         mp->m_dmevmask = 0;
3232
3233         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3234                 /*
3235                  * Find the agi for this ag.
3236                  */
3237                 agibp = xfs_buf_read(mp->m_ddev_targp,
3238                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
3239                                 XFS_FSS_TO_BB(mp, 1), 0);
3240                 if (XFS_BUF_ISERROR(agibp)) {
3241                         xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)",
3242                                 log->l_mp, agibp,
3243                                 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)));
3244                 }
3245                 agi = XFS_BUF_TO_AGI(agibp);
3246                 ASSERT(XFS_AGI_MAGIC ==
3247                         INT_GET(agi->agi_magicnum, ARCH_CONVERT));
3248
3249                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3250
3251                         agino = INT_GET(agi->agi_unlinked[bucket], ARCH_CONVERT);
3252                         while (agino != NULLAGINO) {
3253
3254                                 /*
3255                                  * Release the agi buffer so that it can
3256                                  * be acquired in the normal course of the
3257                                  * transaction to truncate and free the inode.
3258                                  */
3259                                 xfs_buf_relse(agibp);
3260
3261                                 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3262                                 error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3263                                 ASSERT(error || (ip != NULL));
3264
3265                                 if (!error) {
3266                                         /*
3267                                          * Get the on disk inode to find the
3268                                          * next inode in the bucket.
3269                                          */
3270                                         error = xfs_itobp(mp, NULL, ip, &dip,
3271                                                         &ibp, 0);
3272                                         ASSERT(error || (dip != NULL));
3273                                 }
3274
3275                                 if (!error) {
3276                                         ASSERT(ip->i_d.di_nlink == 0);
3277
3278                                         /* setup for the next pass */
3279                                         agino = INT_GET(dip->di_next_unlinked,
3280                                                         ARCH_CONVERT);
3281                                         xfs_buf_relse(ibp);
3282                                         /*
3283                                          * Prevent any DMAPI event from
3284                                          * being sent when the
3285                                          * reference on the inode is
3286                                          * dropped.
3287                                          */
3288                                         ip->i_d.di_dmevmask = 0;
3289
3290                                         /*
3291                                          * If this is a new inode, handle
3292                                          * it specially.  Otherwise,
3293                                          * just drop our reference to the
3294                                          * inode.  If there are no
3295                                          * other references, this will
3296                                          * send the inode to
3297                                          * xfs_inactive() which will
3298                                          * truncate the file and free
3299                                          * the inode.
3300                                          */
3301                                         if (ip->i_d.di_mode == 0)
3302                                                 xfs_iput_new(ip, 0);
3303                                         else
3304                                                 VN_RELE(XFS_ITOV(ip));
3305                                 } else {
3306                                         /*
3307                                          * We can't read in the inode
3308                                          * this bucket points to, or
3309                                          * this inode is messed up.  Just
3310                                          * ditch this bucket of inodes.  We
3311                                          * will lose some inodes and space,
3312                                          * but at least we won't hang.  Call
3313                                          * xlog_recover_clear_agi_bucket()
3314                                          * to perform a transaction to clear
3315                                          * the inode pointer in the bucket.
3316                                          */
3317                                         xlog_recover_clear_agi_bucket(mp, agno,
3318                                                         bucket);
3319
3320                                         agino = NULLAGINO;
3321                                 }
3322
3323                                 /*
3324                                  * Reacquire the agibuffer and continue around
3325                                  * the loop.
3326                                  */
3327                                 agibp = xfs_buf_read(mp->m_ddev_targp,
3328                                                 XFS_AG_DADDR(mp, agno,
3329                                                         XFS_AGI_DADDR(mp)),
3330                                                 XFS_FSS_TO_BB(mp, 1), 0);
3331                                 if (XFS_BUF_ISERROR(agibp)) {
3332                                         xfs_ioerror_alert(
3333                                 "xlog_recover_process_iunlinks(#2)",
3334                                                 log->l_mp, agibp,
3335                                                 XFS_AG_DADDR(mp, agno,
3336                                                         XFS_AGI_DADDR(mp)));
3337                                 }
3338                                 agi = XFS_BUF_TO_AGI(agibp);
3339                                 ASSERT(XFS_AGI_MAGIC == INT_GET(
3340                                         agi->agi_magicnum, ARCH_CONVERT));
3341                         }
3342                 }
3343
3344                 /*
3345                  * Release the buffer for the current agi so we can
3346                  * go on to the next one.
3347                  */
3348                 xfs_buf_relse(agibp);
3349         }
3350
3351         mp->m_dmevmask = mp_dmevmask;
3352 }
3353
3354
3355 #ifdef DEBUG
3356 STATIC void
3357 xlog_pack_data_checksum(
3358         xlog_t          *log,
3359         xlog_in_core_t  *iclog,
3360         int             size)
3361 {
3362         int             i;
3363         uint            *up;
3364         uint            chksum = 0;
3365
3366         up = (uint *)iclog->ic_datap;
3367         /* divide length by 4 to get # words */
3368         for (i = 0; i < (size >> 2); i++) {
3369                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3370                 up++;
3371         }
3372         INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum);
3373 }
3374 #else
3375 #define xlog_pack_data_checksum(log, iclog, size)
3376 #endif
3377
3378 /*
3379  * Stamp cycle number in every block
3380  */
3381 void
3382 xlog_pack_data(
3383         xlog_t                  *log,
3384         xlog_in_core_t          *iclog,
3385         int                     roundoff)
3386 {
3387         int                     i, j, k;
3388         int                     size = iclog->ic_offset + roundoff;
3389         uint                    cycle_lsn;
3390         xfs_caddr_t             dp;
3391         xlog_in_core_2_t        *xhdr;
3392
3393         xlog_pack_data_checksum(log, iclog, size);
3394
3395         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3396
3397         dp = iclog->ic_datap;
3398         for (i = 0; i < BTOBB(size) &&
3399                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3400                 iclog->ic_header.h_cycle_data[i] = *(uint *)dp;
3401                 *(uint *)dp = cycle_lsn;
3402                 dp += BBSIZE;
3403         }
3404
3405         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3406                 xhdr = (xlog_in_core_2_t *)&iclog->ic_header;
3407                 for ( ; i < BTOBB(size); i++) {
3408                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3409                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3410                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp;
3411                         *(uint *)dp = cycle_lsn;
3412                         dp += BBSIZE;
3413                 }
3414
3415                 for (i = 1; i < log->l_iclog_heads; i++) {
3416                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3417                 }
3418         }
3419 }
3420
3421 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
3422 STATIC void
3423 xlog_unpack_data_checksum(
3424         xlog_rec_header_t       *rhead,
3425         xfs_caddr_t             dp,
3426         xlog_t                  *log)
3427 {
3428         uint                    *up = (uint *)dp;
3429         uint                    chksum = 0;
3430         int                     i;
3431
3432         /* divide length by 4 to get # words */
3433         for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) {
3434                 chksum ^= INT_GET(*up, ARCH_CONVERT);
3435                 up++;
3436         }
3437         if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) {
3438             if (rhead->h_chksum ||
3439                 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) {
3440                     cmn_err(CE_DEBUG,
3441                         "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)",
3442                             INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum);
3443                     cmn_err(CE_DEBUG,
3444 "XFS: Disregard message if filesystem was created with non-DEBUG kernel");
3445                     if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3446                             cmn_err(CE_DEBUG,
3447                                 "XFS: LogR this is a LogV2 filesystem");
3448                     }
3449                     log->l_flags |= XLOG_CHKSUM_MISMATCH;
3450             }
3451         }
3452 }
3453 #else
3454 #define xlog_unpack_data_checksum(rhead, dp, log)
3455 #endif
3456
3457 STATIC void
3458 xlog_unpack_data(
3459         xlog_rec_header_t       *rhead,
3460         xfs_caddr_t             dp,
3461         xlog_t                  *log)
3462 {
3463         int                     i, j, k;
3464         xlog_in_core_2_t        *xhdr;
3465
3466         for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) &&
3467                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3468                 *(uint *)dp = *(uint *)&rhead->h_cycle_data[i];
3469                 dp += BBSIZE;
3470         }
3471
3472         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3473                 xhdr = (xlog_in_core_2_t *)rhead;
3474                 for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) {
3475                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3476                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3477                         *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3478                         dp += BBSIZE;
3479                 }
3480         }
3481
3482         xlog_unpack_data_checksum(rhead, dp, log);
3483 }
3484
3485 STATIC int
3486 xlog_valid_rec_header(
3487         xlog_t                  *log,
3488         xlog_rec_header_t       *rhead,
3489         xfs_daddr_t             blkno)
3490 {
3491         int                     hlen;
3492
3493         if (unlikely(
3494             (INT_GET(rhead->h_magicno, ARCH_CONVERT) !=
3495                         XLOG_HEADER_MAGIC_NUM))) {
3496                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3497                                 XFS_ERRLEVEL_LOW, log->l_mp);
3498                 return XFS_ERROR(EFSCORRUPTED);
3499         }
3500         if (unlikely(
3501             (!rhead->h_version ||
3502             (INT_GET(rhead->h_version, ARCH_CONVERT) &
3503                         (~XLOG_VERSION_OKBITS)) != 0))) {
3504                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3505                         __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT));
3506                 return XFS_ERROR(EIO);
3507         }
3508
3509         /* LR body must have data or it wouldn't have been written */
3510         hlen = INT_GET(rhead->h_len, ARCH_CONVERT);
3511         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3512                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3513                                 XFS_ERRLEVEL_LOW, log->l_mp);
3514                 return XFS_ERROR(EFSCORRUPTED);
3515         }
3516         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3517                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3518                                 XFS_ERRLEVEL_LOW, log->l_mp);
3519                 return XFS_ERROR(EFSCORRUPTED);
3520         }
3521         return 0;
3522 }
3523
3524 /*
3525  * Read the log from tail to head and process the log records found.
3526  * Handle the two cases where the tail and head are in the same cycle
3527  * and where the active portion of the log wraps around the end of
3528  * the physical log separately.  The pass parameter is passed through
3529  * to the routines called to process the data and is not looked at
3530  * here.
3531  */
3532 STATIC int
3533 xlog_do_recovery_pass(
3534         xlog_t                  *log,
3535         xfs_daddr_t             head_blk,
3536         xfs_daddr_t             tail_blk,
3537         int                     pass)
3538 {
3539         xlog_rec_header_t       *rhead;
3540         xfs_daddr_t             blk_no;
3541         xfs_caddr_t             bufaddr, offset;
3542         xfs_buf_t               *hbp, *dbp;
3543         int                     error = 0, h_size;
3544         int                     bblks, split_bblks;
3545         int                     hblks, split_hblks, wrapped_hblks;
3546         xlog_recover_t          *rhash[XLOG_RHASH_SIZE];
3547
3548         ASSERT(head_blk != tail_blk);
3549
3550         /*
3551          * Read the header of the tail block and get the iclog buffer size from
3552          * h_size.  Use this to tell how many sectors make up the log header.
3553          */
3554         if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
3555                 /*
3556                  * When using variable length iclogs, read first sector of
3557                  * iclog header and extract the header size from it.  Get a
3558                  * new hbp that is the correct size.
3559                  */
3560                 hbp = xlog_get_bp(log, 1);
3561                 if (!hbp)
3562                         return ENOMEM;
3563                 if ((error = xlog_bread(log, tail_blk, 1, hbp)))
3564                         goto bread_err1;
3565                 offset = xlog_align(log, tail_blk, 1, hbp);
3566                 rhead = (xlog_rec_header_t *)offset;
3567                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3568                 if (error)
3569                         goto bread_err1;
3570                 h_size = INT_GET(rhead->h_size, ARCH_CONVERT);
3571                 if ((INT_GET(rhead->h_version, ARCH_CONVERT)
3572                                 & XLOG_VERSION_2) &&
3573                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3574                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3575                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3576                                 hblks++;
3577                         xlog_put_bp(hbp);
3578                         hbp = xlog_get_bp(log, hblks);
3579                 } else {
3580                         hblks = 1;
3581                 }
3582         } else {
3583                 ASSERT(log->l_sectbb_log == 0);
3584                 hblks = 1;
3585                 hbp = xlog_get_bp(log, 1);
3586                 h_size = XLOG_BIG_RECORD_BSIZE;
3587         }
3588
3589         if (!hbp)
3590                 return ENOMEM;
3591         dbp = xlog_get_bp(log, BTOBB(h_size));
3592         if (!dbp) {
3593                 xlog_put_bp(hbp);
3594                 return ENOMEM;
3595         }
3596
3597         memset(rhash, 0, sizeof(rhash));
3598         if (tail_blk <= head_blk) {
3599                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3600                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3601                                 goto bread_err2;
3602                         offset = xlog_align(log, blk_no, hblks, hbp);
3603                         rhead = (xlog_rec_header_t *)offset;
3604                         error = xlog_valid_rec_header(log, rhead, blk_no);
3605                         if (error)
3606                                 goto bread_err2;
3607
3608                         /* blocks in data section */
3609                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3610                         error = xlog_bread(log, blk_no + hblks, bblks, dbp);
3611                         if (error)
3612                                 goto bread_err2;
3613                         offset = xlog_align(log, blk_no + hblks, bblks, dbp);
3614                         xlog_unpack_data(rhead, offset, log);
3615                         if ((error = xlog_recover_process_data(log,
3616                                                 rhash, rhead, offset, pass)))
3617                                 goto bread_err2;
3618                         blk_no += bblks + hblks;
3619                 }
3620         } else {
3621                 /*
3622                  * Perform recovery around the end of the physical log.
3623                  * When the head is not on the same cycle number as the tail,
3624                  * we can't do a sequential recovery as above.
3625                  */
3626                 blk_no = tail_blk;
3627                 while (blk_no < log->l_logBBsize) {
3628                         /*
3629                          * Check for header wrapping around physical end-of-log
3630                          */
3631                         offset = NULL;
3632                         split_hblks = 0;
3633                         wrapped_hblks = 0;
3634                         if (blk_no + hblks <= log->l_logBBsize) {
3635                                 /* Read header in one read */
3636                                 error = xlog_bread(log, blk_no, hblks, hbp);
3637                                 if (error)
3638                                         goto bread_err2;
3639                                 offset = xlog_align(log, blk_no, hblks, hbp);
3640                         } else {
3641                                 /* This LR is split across physical log end */
3642                                 if (blk_no != log->l_logBBsize) {
3643                                         /* some data before physical log end */
3644                                         ASSERT(blk_no <= INT_MAX);
3645                                         split_hblks = log->l_logBBsize - (int)blk_no;
3646                                         ASSERT(split_hblks > 0);
3647                                         if ((error = xlog_bread(log, blk_no,
3648                                                         split_hblks, hbp)))
3649                                                 goto bread_err2;
3650                                         offset = xlog_align(log, blk_no,
3651                                                         split_hblks, hbp);
3652                                 }
3653                                 /*
3654                                  * Note: this black magic still works with
3655                                  * large sector sizes (non-512) only because:
3656                                  * - we increased the buffer size originally
3657                                  *   by 1 sector giving us enough extra space
3658                                  *   for the second read;
3659                                  * - the log start is guaranteed to be sector
3660                                  *   aligned;
3661                                  * - we read the log end (LR header start)
3662                                  *   _first_, then the log start (LR header end)
3663                                  *   - order is important.
3664                                  */
3665                                 bufaddr = XFS_BUF_PTR(hbp);
3666                                 XFS_BUF_SET_PTR(hbp,
3667                                                 bufaddr + BBTOB(split_hblks),
3668                                                 BBTOB(hblks - split_hblks));
3669                                 wrapped_hblks = hblks - split_hblks;
3670                                 error = xlog_bread(log, 0, wrapped_hblks, hbp);
3671                                 if (error)
3672                                         goto bread_err2;
3673                                 XFS_BUF_SET_PTR(hbp, bufaddr, BBTOB(hblks));
3674                                 if (!offset)
3675                                         offset = xlog_align(log, 0,
3676                                                         wrapped_hblks, hbp);
3677                         }
3678                         rhead = (xlog_rec_header_t *)offset;
3679                         error = xlog_valid_rec_header(log, rhead,
3680                                                 split_hblks ? blk_no : 0);
3681                         if (error)
3682                                 goto bread_err2;
3683
3684                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3685                         blk_no += hblks;
3686
3687                         /* Read in data for log record */
3688                         if (blk_no + bblks <= log->l_logBBsize) {
3689                                 error = xlog_bread(log, blk_no, bblks, dbp);
3690                                 if (error)
3691                                         goto bread_err2;
3692                                 offset = xlog_align(log, blk_no, bblks, dbp);
3693                         } else {
3694                                 /* This log record is split across the
3695                                  * physical end of log */
3696                                 offset = NULL;
3697                                 split_bblks = 0;
3698                                 if (blk_no != log->l_logBBsize) {
3699                                         /* some data is before the physical
3700                                          * end of log */
3701                                         ASSERT(!wrapped_hblks);
3702                                         ASSERT(blk_no <= INT_MAX);
3703                                         split_bblks =
3704                                                 log->l_logBBsize - (int)blk_no;
3705                                         ASSERT(split_bblks > 0);
3706                                         if ((error = xlog_bread(log, blk_no,
3707                                                         split_bblks, dbp)))
3708                                                 goto bread_err2;
3709                                         offset = xlog_align(log, blk_no,
3710                                                         split_bblks, dbp);
3711                                 }
3712                                 /*
3713                                  * Note: this black magic still works with
3714                                  * large sector sizes (non-512) only because:
3715                                  * - we increased the buffer size originally
3716                                  *   by 1 sector giving us enough extra space
3717                                  *   for the second read;
3718                                  * - the log start is guaranteed to be sector
3719                                  *   aligned;
3720                                  * - we read the log end (LR header start)
3721                                  *   _first_, then the log start (LR header end)
3722                                  *   - order is important.
3723                                  */
3724                                 bufaddr = XFS_BUF_PTR(dbp);
3725                                 XFS_BUF_SET_PTR(dbp,
3726                                                 bufaddr + BBTOB(split_bblks),
3727                                                 BBTOB(bblks - split_bblks));
3728                                 if ((error = xlog_bread(log, wrapped_hblks,
3729                                                 bblks - split_bblks, dbp)))
3730                                         goto bread_err2;
3731                                 XFS_BUF_SET_PTR(dbp, bufaddr, h_size);
3732                                 if (!offset)
3733                                         offset = xlog_align(log, wrapped_hblks,
3734                                                 bblks - split_bblks, dbp);
3735                         }
3736                         xlog_unpack_data(rhead, offset, log);
3737                         if ((error = xlog_recover_process_data(log, rhash,
3738                                                         rhead, offset, pass)))
3739                                 goto bread_err2;
3740                         blk_no += bblks;
3741                 }
3742
3743                 ASSERT(blk_no >= log->l_logBBsize);
3744                 blk_no -= log->l_logBBsize;
3745
3746                 /* read first part of physical log */
3747                 while (blk_no < head_blk) {
3748                         if ((error = xlog_bread(log, blk_no, hblks, hbp)))
3749                                 goto bread_err2;
3750                         offset = xlog_align(log, blk_no, hblks, hbp);
3751                         rhead = (xlog_rec_header_t *)offset;
3752                         error = xlog_valid_rec_header(log, rhead, blk_no);
3753                         if (error)
3754                                 goto bread_err2;
3755                         bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT));
3756                         if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp)))
3757                                 goto bread_err2;
3758                         offset = xlog_align(log, blk_no+hblks, bblks, dbp);
3759                         xlog_unpack_data(rhead, offset, log);
3760                         if ((error = xlog_recover_process_data(log, rhash,
3761                                                         rhead, offset, pass)))
3762                                 goto bread_err2;
3763                         blk_no += bblks + hblks;
3764                 }
3765         }
3766
3767  bread_err2:
3768         xlog_put_bp(dbp);
3769  bread_err1:
3770         xlog_put_bp(hbp);
3771         return error;
3772 }
3773
3774 /*
3775  * Do the recovery of the log.  We actually do this in two phases.
3776  * The two passes are necessary in order to implement the function
3777  * of cancelling a record written into the log.  The first pass
3778  * determines those things which have been cancelled, and the
3779  * second pass replays log items normally except for those which
3780  * have been cancelled.  The handling of the replay and cancellations
3781  * takes place in the log item type specific routines.
3782  *
3783  * The table of items which have cancel records in the log is allocated
3784  * and freed at this level, since only here do we know when all of
3785  * the log recovery has been completed.
3786  */
3787 STATIC int
3788 xlog_do_log_recovery(
3789         xlog_t          *log,
3790         xfs_daddr_t     head_blk,
3791         xfs_daddr_t     tail_blk)
3792 {
3793         int             error;
3794
3795         ASSERT(head_blk != tail_blk);
3796
3797         /*
3798          * First do a pass to find all of the cancelled buf log items.
3799          * Store them in the buf_cancel_table for use in the second pass.
3800          */
3801         log->l_buf_cancel_table =
3802                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3803                                                  sizeof(xfs_buf_cancel_t*),
3804                                                  KM_SLEEP);
3805         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3806                                       XLOG_RECOVER_PASS1);
3807         if (error != 0) {
3808                 kmem_free(log->l_buf_cancel_table,
3809                           XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3810                 log->l_buf_cancel_table = NULL;
3811                 return error;
3812         }
3813         /*
3814          * Then do a second pass to actually recover the items in the log.
3815          * When it is complete free the table of buf cancel items.
3816          */
3817         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3818                                       XLOG_RECOVER_PASS2);
3819 #ifdef DEBUG
3820         {
3821                 int     i;
3822
3823                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3824                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3825         }
3826 #endif  /* DEBUG */
3827
3828         kmem_free(log->l_buf_cancel_table,
3829                   XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
3830         log->l_buf_cancel_table = NULL;
3831
3832         return error;
3833 }
3834
3835 /*
3836  * Do the actual recovery
3837  */
3838 STATIC int
3839 xlog_do_recover(
3840         xlog_t          *log,
3841         xfs_daddr_t     head_blk,
3842         xfs_daddr_t     tail_blk)
3843 {
3844         int             error;
3845         xfs_buf_t       *bp;
3846         xfs_sb_t        *sbp;
3847
3848         /*
3849          * First replay the images in the log.
3850          */
3851         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3852         if (error) {
3853                 return error;
3854         }
3855
3856         XFS_bflush(log->l_mp->m_ddev_targp);
3857
3858         /*
3859          * If IO errors happened during recovery, bail out.
3860          */
3861         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3862                 return (EIO);
3863         }
3864
3865         /*
3866          * We now update the tail_lsn since much of the recovery has completed
3867          * and there may be space available to use.  If there were no extent
3868          * or iunlinks, we can free up the entire log and set the tail_lsn to
3869          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3870          * lsn of the last known good LR on disk.  If there are extent frees
3871          * or iunlinks they will have some entries in the AIL; so we look at
3872          * the AIL to determine how to set the tail_lsn.
3873          */
3874         xlog_assign_tail_lsn(log->l_mp);
3875
3876         /*
3877          * Now that we've finished replaying all buffer and inode
3878          * updates, re-read in the superblock.
3879          */
3880         bp = xfs_getsb(log->l_mp, 0);
3881         XFS_BUF_UNDONE(bp);
3882         XFS_BUF_READ(bp);
3883         xfsbdstrat(log->l_mp, bp);
3884         if ((error = xfs_iowait(bp))) {
3885                 xfs_ioerror_alert("xlog_do_recover",
3886                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3887                 ASSERT(0);
3888                 xfs_buf_relse(bp);
3889                 return error;
3890         }
3891
3892         /* Convert superblock from on-disk format */
3893         sbp = &log->l_mp->m_sb;
3894         xfs_xlatesb(XFS_BUF_TO_SBP(bp), sbp, 1, XFS_SB_ALL_BITS);
3895         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3896         ASSERT(XFS_SB_GOOD_VERSION(sbp));
3897         xfs_buf_relse(bp);
3898
3899         xlog_recover_check_summary(log);
3900
3901         /* Normal transactions can now occur */
3902         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3903         return 0;
3904 }
3905
3906 /*
3907  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3908  *
3909  * Return error or zero.
3910  */
3911 int
3912 xlog_recover(
3913         xlog_t          *log,
3914         int             readonly)
3915 {
3916         xfs_daddr_t     head_blk, tail_blk;
3917         int             error;
3918
3919         /* find the tail of the log */
3920         if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly)))
3921                 return error;
3922
3923         if (tail_blk != head_blk) {
3924                 /* There used to be a comment here:
3925                  *
3926                  * disallow recovery on read-only mounts.  note -- mount
3927                  * checks for ENOSPC and turns it into an intelligent
3928                  * error message.
3929                  * ...but this is no longer true.  Now, unless you specify
3930                  * NORECOVERY (in which case this function would never be
3931                  * called), we just go ahead and recover.  We do this all
3932                  * under the vfs layer, so we can get away with it unless
3933                  * the device itself is read-only, in which case we fail.
3934                  */
3935                 if ((error = xfs_dev_is_read_only(log->l_mp,
3936                                                 "recovery required"))) {
3937                         return error;
3938                 }
3939
3940                 cmn_err(CE_NOTE,
3941                         "Starting XFS recovery on filesystem: %s (dev: %s)",
3942                         log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
3943
3944                 error = xlog_do_recover(log, head_blk, tail_blk);
3945                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3946         }
3947         return error;
3948 }
3949
3950 /*
3951  * In the first part of recovery we replay inodes and buffers and build
3952  * up the list of extent free items which need to be processed.  Here
3953  * we process the extent free items and clean up the on disk unlinked
3954  * inode lists.  This is separated from the first part of recovery so
3955  * that the root and real-time bitmap inodes can be read in from disk in
3956  * between the two stages.  This is necessary so that we can free space
3957  * in the real-time portion of the file system.
3958  */
3959 int
3960 xlog_recover_finish(
3961         xlog_t          *log,
3962         int             mfsi_flags)
3963 {
3964         /*
3965          * Now we're ready to do the transactions needed for the
3966          * rest of recovery.  Start with completing all the extent
3967          * free intent records and then process the unlinked inode
3968          * lists.  At this point, we essentially run in normal mode
3969          * except that we're still performing recovery actions
3970          * rather than accepting new requests.
3971          */
3972         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3973                 xlog_recover_process_efis(log);
3974                 /*
3975                  * Sync the log to get all the EFIs out of the AIL.
3976                  * This isn't absolutely necessary, but it helps in
3977                  * case the unlink transactions would have problems
3978                  * pushing the EFIs out of the way.
3979                  */
3980                 xfs_log_force(log->l_mp, (xfs_lsn_t)0,
3981                               (XFS_LOG_FORCE | XFS_LOG_SYNC));
3982
3983                 if ( (mfsi_flags & XFS_MFSI_NOUNLINK) == 0 ) {
3984                         xlog_recover_process_iunlinks(log);
3985                 }
3986
3987                 xlog_recover_check_summary(log);
3988
3989                 cmn_err(CE_NOTE,
3990                         "Ending XFS recovery on filesystem: %s (dev: %s)",
3991                         log->l_mp->m_fsname, XFS_BUFTARG_NAME(log->l_targ));
3992                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3993         } else {
3994                 cmn_err(CE_DEBUG,
3995                         "!Ending clean XFS mount for filesystem: %s",
3996                         log->l_mp->m_fsname);
3997         }
3998         return 0;
3999 }
4000
4001
4002 #if defined(DEBUG)
4003 /*
4004  * Read all of the agf and agi counters and check that they
4005  * are consistent with the superblock counters.
4006  */
4007 void
4008 xlog_recover_check_summary(
4009         xlog_t          *log)
4010 {
4011         xfs_mount_t     *mp;
4012         xfs_agf_t       *agfp;
4013         xfs_agi_t       *agip;
4014         xfs_buf_t       *agfbp;
4015         xfs_buf_t       *agibp;
4016         xfs_daddr_t     agfdaddr;
4017         xfs_daddr_t     agidaddr;
4018         xfs_buf_t       *sbbp;
4019 #ifdef XFS_LOUD_RECOVERY
4020         xfs_sb_t        *sbp;
4021 #endif
4022         xfs_agnumber_t  agno;
4023         __uint64_t      freeblks;
4024         __uint64_t      itotal;
4025         __uint64_t      ifree;
4026
4027         mp = log->l_mp;
4028
4029         freeblks = 0LL;
4030         itotal = 0LL;
4031         ifree = 0LL;
4032         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4033                 agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp));
4034                 agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr,
4035                                 XFS_FSS_TO_BB(mp, 1), 0);
4036                 if (XFS_BUF_ISERROR(agfbp)) {
4037                         xfs_ioerror_alert("xlog_recover_check_summary(agf)",
4038                                                 mp, agfbp, agfdaddr);
4039                 }
4040                 agfp = XFS_BUF_TO_AGF(agfbp);
4041                 ASSERT(XFS_AGF_MAGIC ==
4042                         INT_GET(agfp->agf_magicnum, ARCH_CONVERT));
4043                 ASSERT(XFS_AGF_GOOD_VERSION(
4044                         INT_GET(agfp->agf_versionnum, ARCH_CONVERT)));
4045                 ASSERT(INT_GET(agfp->agf_seqno, ARCH_CONVERT) == agno);
4046
4047                 freeblks += INT_GET(agfp->agf_freeblks, ARCH_CONVERT) +
4048                             INT_GET(agfp->agf_flcount, ARCH_CONVERT);
4049                 xfs_buf_relse(agfbp);
4050
4051                 agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
4052                 agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr,
4053                                 XFS_FSS_TO_BB(mp, 1), 0);
4054                 if (XFS_BUF_ISERROR(agibp)) {
4055                         xfs_ioerror_alert("xlog_recover_check_summary(agi)",
4056                                           mp, agibp, agidaddr);
4057                 }
4058                 agip = XFS_BUF_TO_AGI(agibp);
4059                 ASSERT(XFS_AGI_MAGIC ==
4060                         INT_GET(agip->agi_magicnum, ARCH_CONVERT));
4061                 ASSERT(XFS_AGI_GOOD_VERSION(
4062                         INT_GET(agip->agi_versionnum, ARCH_CONVERT)));
4063                 ASSERT(INT_GET(agip->agi_seqno, ARCH_CONVERT) == agno);
4064
4065                 itotal += INT_GET(agip->agi_count, ARCH_CONVERT);
4066                 ifree += INT_GET(agip->agi_freecount, ARCH_CONVERT);
4067                 xfs_buf_relse(agibp);
4068         }
4069
4070         sbbp = xfs_getsb(mp, 0);
4071 #ifdef XFS_LOUD_RECOVERY
4072         sbp = &mp->m_sb;
4073         xfs_xlatesb(XFS_BUF_TO_SBP(sbbp), sbp, 1, XFS_SB_ALL_BITS);
4074         cmn_err(CE_NOTE,
4075                 "xlog_recover_check_summary: sb_icount %Lu itotal %Lu",
4076                 sbp->sb_icount, itotal);
4077         cmn_err(CE_NOTE,
4078                 "xlog_recover_check_summary: sb_ifree %Lu itotal %Lu",
4079                 sbp->sb_ifree, ifree);
4080         cmn_err(CE_NOTE,
4081                 "xlog_recover_check_summary: sb_fdblocks %Lu freeblks %Lu",
4082                 sbp->sb_fdblocks, freeblks);
4083 #if 0
4084         /*
4085          * This is turned off until I account for the allocation
4086          * btree blocks which live in free space.
4087          */
4088         ASSERT(sbp->sb_icount == itotal);
4089         ASSERT(sbp->sb_ifree == ifree);
4090         ASSERT(sbp->sb_fdblocks == freeblks);
4091 #endif
4092 #endif
4093         xfs_buf_relse(sbbp);
4094 }
4095 #endif /* DEBUG */