xfs: clean up xlog_align
[linux-2.6.git] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_error.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_log_priv.h"
42 #include "xfs_buf_item.h"
43 #include "xfs_log_recover.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_trans_priv.h"
46 #include "xfs_quota.h"
47 #include "xfs_rw.h"
48 #include "xfs_utils.h"
49 #include "xfs_trace.h"
50
51 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
52 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
53 #if defined(DEBUG)
54 STATIC void     xlog_recover_check_summary(xlog_t *);
55 #else
56 #define xlog_recover_check_summary(log)
57 #endif
58
59 /*
60  * Sector aligned buffer routines for buffer create/read/write/access
61  */
62
63 /*
64  * Verify the given count of basic blocks is valid number of blocks
65  * to specify for an operation involving the given XFS log buffer.
66  * Returns nonzero if the count is valid, 0 otherwise.
67  */
68
69 static inline int
70 xlog_buf_bbcount_valid(
71         xlog_t          *log,
72         int             bbcount)
73 {
74         return bbcount > 0 && bbcount <= log->l_logBBsize;
75 }
76
77 /*
78  * Allocate a buffer to hold log data.  The buffer needs to be able
79  * to map to a range of nbblks basic blocks at any valid (basic
80  * block) offset within the log.
81  */
82 STATIC xfs_buf_t *
83 xlog_get_bp(
84         xlog_t          *log,
85         int             nbblks)
86 {
87         if (!xlog_buf_bbcount_valid(log, nbblks)) {
88                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
89                         nbblks);
90                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
91                 return NULL;
92         }
93
94         /*
95          * We do log I/O in units of log sectors (a power-of-2
96          * multiple of the basic block size), so we round up the
97          * requested size to acommodate the basic blocks required
98          * for complete log sectors.
99          *
100          * In addition, the buffer may be used for a non-sector-
101          * aligned block offset, in which case an I/O of the
102          * requested size could extend beyond the end of the
103          * buffer.  If the requested size is only 1 basic block it
104          * will never straddle a sector boundary, so this won't be
105          * an issue.  Nor will this be a problem if the log I/O is
106          * done in basic blocks (sector size 1).  But otherwise we
107          * extend the buffer by one extra log sector to ensure
108          * there's space to accomodate this possiblility.
109          */
110         if (nbblks > 1 && log->l_sectBBsize > 1)
111                 nbblks += log->l_sectBBsize;
112         nbblks = round_up(nbblks, log->l_sectBBsize);
113
114         return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
115 }
116
117 STATIC void
118 xlog_put_bp(
119         xfs_buf_t       *bp)
120 {
121         xfs_buf_free(bp);
122 }
123
124 /*
125  * Return the address of the start of the given block number's data
126  * in a log buffer.  The buffer covers a log sector-aligned region.
127  */
128 STATIC xfs_caddr_t
129 xlog_align(
130         xlog_t          *log,
131         xfs_daddr_t     blk_no,
132         int             nbblks,
133         xfs_buf_t       *bp)
134 {
135         xfs_daddr_t     offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
136
137         ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
138         return XFS_BUF_PTR(bp) + BBTOB(offset);
139 }
140
141
142 /*
143  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
144  */
145 STATIC int
146 xlog_bread_noalign(
147         xlog_t          *log,
148         xfs_daddr_t     blk_no,
149         int             nbblks,
150         xfs_buf_t       *bp)
151 {
152         int             error;
153
154         if (!xlog_buf_bbcount_valid(log, nbblks)) {
155                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
156                         nbblks);
157                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
158                 return EFSCORRUPTED;
159         }
160
161         blk_no = round_down(blk_no, log->l_sectBBsize);
162         nbblks = round_up(nbblks, log->l_sectBBsize);
163
164         ASSERT(nbblks > 0);
165         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
166
167         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
168         XFS_BUF_READ(bp);
169         XFS_BUF_BUSY(bp);
170         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
171         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
172
173         xfsbdstrat(log->l_mp, bp);
174         error = xfs_iowait(bp);
175         if (error)
176                 xfs_ioerror_alert("xlog_bread", log->l_mp,
177                                   bp, XFS_BUF_ADDR(bp));
178         return error;
179 }
180
181 STATIC int
182 xlog_bread(
183         xlog_t          *log,
184         xfs_daddr_t     blk_no,
185         int             nbblks,
186         xfs_buf_t       *bp,
187         xfs_caddr_t     *offset)
188 {
189         int             error;
190
191         error = xlog_bread_noalign(log, blk_no, nbblks, bp);
192         if (error)
193                 return error;
194
195         *offset = xlog_align(log, blk_no, nbblks, bp);
196         return 0;
197 }
198
199 /*
200  * Write out the buffer at the given block for the given number of blocks.
201  * The buffer is kept locked across the write and is returned locked.
202  * This can only be used for synchronous log writes.
203  */
204 STATIC int
205 xlog_bwrite(
206         xlog_t          *log,
207         xfs_daddr_t     blk_no,
208         int             nbblks,
209         xfs_buf_t       *bp)
210 {
211         int             error;
212
213         if (!xlog_buf_bbcount_valid(log, nbblks)) {
214                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
215                         nbblks);
216                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
217                 return EFSCORRUPTED;
218         }
219
220         blk_no = round_down(blk_no, log->l_sectBBsize);
221         nbblks = round_up(nbblks, log->l_sectBBsize);
222
223         ASSERT(nbblks > 0);
224         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
225
226         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
227         XFS_BUF_ZEROFLAGS(bp);
228         XFS_BUF_BUSY(bp);
229         XFS_BUF_HOLD(bp);
230         XFS_BUF_PSEMA(bp, PRIBIO);
231         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
232         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
233
234         if ((error = xfs_bwrite(log->l_mp, bp)))
235                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
236                                   bp, XFS_BUF_ADDR(bp));
237         return error;
238 }
239
240 #ifdef DEBUG
241 /*
242  * dump debug superblock and log record information
243  */
244 STATIC void
245 xlog_header_check_dump(
246         xfs_mount_t             *mp,
247         xlog_rec_header_t       *head)
248 {
249         cmn_err(CE_DEBUG, "%s:  SB : uuid = %pU, fmt = %d\n",
250                 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
251         cmn_err(CE_DEBUG, "    log : uuid = %pU, fmt = %d\n",
252                 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
253 }
254 #else
255 #define xlog_header_check_dump(mp, head)
256 #endif
257
258 /*
259  * check log record header for recovery
260  */
261 STATIC int
262 xlog_header_check_recover(
263         xfs_mount_t             *mp,
264         xlog_rec_header_t       *head)
265 {
266         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
267
268         /*
269          * IRIX doesn't write the h_fmt field and leaves it zeroed
270          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
271          * a dirty log created in IRIX.
272          */
273         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
274                 xlog_warn(
275         "XFS: dirty log written in incompatible format - can't recover");
276                 xlog_header_check_dump(mp, head);
277                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
278                                  XFS_ERRLEVEL_HIGH, mp);
279                 return XFS_ERROR(EFSCORRUPTED);
280         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
281                 xlog_warn(
282         "XFS: dirty log entry has mismatched uuid - can't recover");
283                 xlog_header_check_dump(mp, head);
284                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
285                                  XFS_ERRLEVEL_HIGH, mp);
286                 return XFS_ERROR(EFSCORRUPTED);
287         }
288         return 0;
289 }
290
291 /*
292  * read the head block of the log and check the header
293  */
294 STATIC int
295 xlog_header_check_mount(
296         xfs_mount_t             *mp,
297         xlog_rec_header_t       *head)
298 {
299         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
300
301         if (uuid_is_nil(&head->h_fs_uuid)) {
302                 /*
303                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
304                  * h_fs_uuid is nil, we assume this log was last mounted
305                  * by IRIX and continue.
306                  */
307                 xlog_warn("XFS: nil uuid in log - IRIX style log");
308         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
309                 xlog_warn("XFS: log has mismatched uuid - can't recover");
310                 xlog_header_check_dump(mp, head);
311                 XFS_ERROR_REPORT("xlog_header_check_mount",
312                                  XFS_ERRLEVEL_HIGH, mp);
313                 return XFS_ERROR(EFSCORRUPTED);
314         }
315         return 0;
316 }
317
318 STATIC void
319 xlog_recover_iodone(
320         struct xfs_buf  *bp)
321 {
322         if (XFS_BUF_GETERROR(bp)) {
323                 /*
324                  * We're not going to bother about retrying
325                  * this during recovery. One strike!
326                  */
327                 xfs_ioerror_alert("xlog_recover_iodone",
328                                   bp->b_mount, bp, XFS_BUF_ADDR(bp));
329                 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
330         }
331         bp->b_mount = NULL;
332         XFS_BUF_CLR_IODONE_FUNC(bp);
333         xfs_biodone(bp);
334 }
335
336 /*
337  * This routine finds (to an approximation) the first block in the physical
338  * log which contains the given cycle.  It uses a binary search algorithm.
339  * Note that the algorithm can not be perfect because the disk will not
340  * necessarily be perfect.
341  */
342 STATIC int
343 xlog_find_cycle_start(
344         xlog_t          *log,
345         xfs_buf_t       *bp,
346         xfs_daddr_t     first_blk,
347         xfs_daddr_t     *last_blk,
348         uint            cycle)
349 {
350         xfs_caddr_t     offset;
351         xfs_daddr_t     mid_blk;
352         xfs_daddr_t     end_blk;
353         uint            mid_cycle;
354         int             error;
355
356         end_blk = *last_blk;
357         mid_blk = BLK_AVG(first_blk, end_blk);
358         while (mid_blk != first_blk && mid_blk != end_blk) {
359                 error = xlog_bread(log, mid_blk, 1, bp, &offset);
360                 if (error)
361                         return error;
362                 mid_cycle = xlog_get_cycle(offset);
363                 if (mid_cycle == cycle)
364                         end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
365                 else
366                         first_blk = mid_blk; /* first_half_cycle == mid_cycle */
367                 mid_blk = BLK_AVG(first_blk, end_blk);
368         }
369         ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
370                (mid_blk == end_blk && mid_blk-1 == first_blk));
371
372         *last_blk = end_blk;
373
374         return 0;
375 }
376
377 /*
378  * Check that a range of blocks does not contain stop_on_cycle_no.
379  * Fill in *new_blk with the block offset where such a block is
380  * found, or with -1 (an invalid block number) if there is no such
381  * block in the range.  The scan needs to occur from front to back
382  * and the pointer into the region must be updated since a later
383  * routine will need to perform another test.
384  */
385 STATIC int
386 xlog_find_verify_cycle(
387         xlog_t          *log,
388         xfs_daddr_t     start_blk,
389         int             nbblks,
390         uint            stop_on_cycle_no,
391         xfs_daddr_t     *new_blk)
392 {
393         xfs_daddr_t     i, j;
394         uint            cycle;
395         xfs_buf_t       *bp;
396         xfs_daddr_t     bufblks;
397         xfs_caddr_t     buf = NULL;
398         int             error = 0;
399
400         /*
401          * Greedily allocate a buffer big enough to handle the full
402          * range of basic blocks we'll be examining.  If that fails,
403          * try a smaller size.  We need to be able to read at least
404          * a log sector, or we're out of luck.
405          */
406         bufblks = 1 << ffs(nbblks);
407         while (!(bp = xlog_get_bp(log, bufblks))) {
408                 bufblks >>= 1;
409                 if (bufblks < log->l_sectBBsize)
410                         return ENOMEM;
411         }
412
413         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
414                 int     bcount;
415
416                 bcount = min(bufblks, (start_blk + nbblks - i));
417
418                 error = xlog_bread(log, i, bcount, bp, &buf);
419                 if (error)
420                         goto out;
421
422                 for (j = 0; j < bcount; j++) {
423                         cycle = xlog_get_cycle(buf);
424                         if (cycle == stop_on_cycle_no) {
425                                 *new_blk = i+j;
426                                 goto out;
427                         }
428
429                         buf += BBSIZE;
430                 }
431         }
432
433         *new_blk = -1;
434
435 out:
436         xlog_put_bp(bp);
437         return error;
438 }
439
440 /*
441  * Potentially backup over partial log record write.
442  *
443  * In the typical case, last_blk is the number of the block directly after
444  * a good log record.  Therefore, we subtract one to get the block number
445  * of the last block in the given buffer.  extra_bblks contains the number
446  * of blocks we would have read on a previous read.  This happens when the
447  * last log record is split over the end of the physical log.
448  *
449  * extra_bblks is the number of blocks potentially verified on a previous
450  * call to this routine.
451  */
452 STATIC int
453 xlog_find_verify_log_record(
454         xlog_t                  *log,
455         xfs_daddr_t             start_blk,
456         xfs_daddr_t             *last_blk,
457         int                     extra_bblks)
458 {
459         xfs_daddr_t             i;
460         xfs_buf_t               *bp;
461         xfs_caddr_t             offset = NULL;
462         xlog_rec_header_t       *head = NULL;
463         int                     error = 0;
464         int                     smallmem = 0;
465         int                     num_blks = *last_blk - start_blk;
466         int                     xhdrs;
467
468         ASSERT(start_blk != 0 || *last_blk != start_blk);
469
470         if (!(bp = xlog_get_bp(log, num_blks))) {
471                 if (!(bp = xlog_get_bp(log, 1)))
472                         return ENOMEM;
473                 smallmem = 1;
474         } else {
475                 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
476                 if (error)
477                         goto out;
478                 offset += ((num_blks - 1) << BBSHIFT);
479         }
480
481         for (i = (*last_blk) - 1; i >= 0; i--) {
482                 if (i < start_blk) {
483                         /* valid log record not found */
484                         xlog_warn(
485                 "XFS: Log inconsistent (didn't find previous header)");
486                         ASSERT(0);
487                         error = XFS_ERROR(EIO);
488                         goto out;
489                 }
490
491                 if (smallmem) {
492                         error = xlog_bread(log, i, 1, bp, &offset);
493                         if (error)
494                                 goto out;
495                 }
496
497                 head = (xlog_rec_header_t *)offset;
498
499                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
500                         break;
501
502                 if (!smallmem)
503                         offset -= BBSIZE;
504         }
505
506         /*
507          * We hit the beginning of the physical log & still no header.  Return
508          * to caller.  If caller can handle a return of -1, then this routine
509          * will be called again for the end of the physical log.
510          */
511         if (i == -1) {
512                 error = -1;
513                 goto out;
514         }
515
516         /*
517          * We have the final block of the good log (the first block
518          * of the log record _before_ the head. So we check the uuid.
519          */
520         if ((error = xlog_header_check_mount(log->l_mp, head)))
521                 goto out;
522
523         /*
524          * We may have found a log record header before we expected one.
525          * last_blk will be the 1st block # with a given cycle #.  We may end
526          * up reading an entire log record.  In this case, we don't want to
527          * reset last_blk.  Only when last_blk points in the middle of a log
528          * record do we update last_blk.
529          */
530         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
531                 uint    h_size = be32_to_cpu(head->h_size);
532
533                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
534                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
535                         xhdrs++;
536         } else {
537                 xhdrs = 1;
538         }
539
540         if (*last_blk - i + extra_bblks !=
541             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
542                 *last_blk = i;
543
544 out:
545         xlog_put_bp(bp);
546         return error;
547 }
548
549 /*
550  * Head is defined to be the point of the log where the next log write
551  * write could go.  This means that incomplete LR writes at the end are
552  * eliminated when calculating the head.  We aren't guaranteed that previous
553  * LR have complete transactions.  We only know that a cycle number of
554  * current cycle number -1 won't be present in the log if we start writing
555  * from our current block number.
556  *
557  * last_blk contains the block number of the first block with a given
558  * cycle number.
559  *
560  * Return: zero if normal, non-zero if error.
561  */
562 STATIC int
563 xlog_find_head(
564         xlog_t          *log,
565         xfs_daddr_t     *return_head_blk)
566 {
567         xfs_buf_t       *bp;
568         xfs_caddr_t     offset;
569         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
570         int             num_scan_bblks;
571         uint            first_half_cycle, last_half_cycle;
572         uint            stop_on_cycle;
573         int             error, log_bbnum = log->l_logBBsize;
574
575         /* Is the end of the log device zeroed? */
576         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
577                 *return_head_blk = first_blk;
578
579                 /* Is the whole lot zeroed? */
580                 if (!first_blk) {
581                         /* Linux XFS shouldn't generate totally zeroed logs -
582                          * mkfs etc write a dummy unmount record to a fresh
583                          * log so we can store the uuid in there
584                          */
585                         xlog_warn("XFS: totally zeroed log");
586                 }
587
588                 return 0;
589         } else if (error) {
590                 xlog_warn("XFS: empty log check failed");
591                 return error;
592         }
593
594         first_blk = 0;                  /* get cycle # of 1st block */
595         bp = xlog_get_bp(log, 1);
596         if (!bp)
597                 return ENOMEM;
598
599         error = xlog_bread(log, 0, 1, bp, &offset);
600         if (error)
601                 goto bp_err;
602
603         first_half_cycle = xlog_get_cycle(offset);
604
605         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
606         error = xlog_bread(log, last_blk, 1, bp, &offset);
607         if (error)
608                 goto bp_err;
609
610         last_half_cycle = xlog_get_cycle(offset);
611         ASSERT(last_half_cycle != 0);
612
613         /*
614          * If the 1st half cycle number is equal to the last half cycle number,
615          * then the entire log is stamped with the same cycle number.  In this
616          * case, head_blk can't be set to zero (which makes sense).  The below
617          * math doesn't work out properly with head_blk equal to zero.  Instead,
618          * we set it to log_bbnum which is an invalid block number, but this
619          * value makes the math correct.  If head_blk doesn't changed through
620          * all the tests below, *head_blk is set to zero at the very end rather
621          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
622          * in a circular file.
623          */
624         if (first_half_cycle == last_half_cycle) {
625                 /*
626                  * In this case we believe that the entire log should have
627                  * cycle number last_half_cycle.  We need to scan backwards
628                  * from the end verifying that there are no holes still
629                  * containing last_half_cycle - 1.  If we find such a hole,
630                  * then the start of that hole will be the new head.  The
631                  * simple case looks like
632                  *        x | x ... | x - 1 | x
633                  * Another case that fits this picture would be
634                  *        x | x + 1 | x ... | x
635                  * In this case the head really is somewhere at the end of the
636                  * log, as one of the latest writes at the beginning was
637                  * incomplete.
638                  * One more case is
639                  *        x | x + 1 | x ... | x - 1 | x
640                  * This is really the combination of the above two cases, and
641                  * the head has to end up at the start of the x-1 hole at the
642                  * end of the log.
643                  *
644                  * In the 256k log case, we will read from the beginning to the
645                  * end of the log and search for cycle numbers equal to x-1.
646                  * We don't worry about the x+1 blocks that we encounter,
647                  * because we know that they cannot be the head since the log
648                  * started with x.
649                  */
650                 head_blk = log_bbnum;
651                 stop_on_cycle = last_half_cycle - 1;
652         } else {
653                 /*
654                  * In this case we want to find the first block with cycle
655                  * number matching last_half_cycle.  We expect the log to be
656                  * some variation on
657                  *        x + 1 ... | x ... | x
658                  * The first block with cycle number x (last_half_cycle) will
659                  * be where the new head belongs.  First we do a binary search
660                  * for the first occurrence of last_half_cycle.  The binary
661                  * search may not be totally accurate, so then we scan back
662                  * from there looking for occurrences of last_half_cycle before
663                  * us.  If that backwards scan wraps around the beginning of
664                  * the log, then we look for occurrences of last_half_cycle - 1
665                  * at the end of the log.  The cases we're looking for look
666                  * like
667                  *                               v binary search stopped here
668                  *        x + 1 ... | x | x + 1 | x ... | x
669                  *                   ^ but we want to locate this spot
670                  * or
671                  *        <---------> less than scan distance
672                  *        x + 1 ... | x ... | x - 1 | x
673                  *                           ^ we want to locate this spot
674                  */
675                 stop_on_cycle = last_half_cycle;
676                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
677                                                 &head_blk, last_half_cycle)))
678                         goto bp_err;
679         }
680
681         /*
682          * Now validate the answer.  Scan back some number of maximum possible
683          * blocks and make sure each one has the expected cycle number.  The
684          * maximum is determined by the total possible amount of buffering
685          * in the in-core log.  The following number can be made tighter if
686          * we actually look at the block size of the filesystem.
687          */
688         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
689         if (head_blk >= num_scan_bblks) {
690                 /*
691                  * We are guaranteed that the entire check can be performed
692                  * in one buffer.
693                  */
694                 start_blk = head_blk - num_scan_bblks;
695                 if ((error = xlog_find_verify_cycle(log,
696                                                 start_blk, num_scan_bblks,
697                                                 stop_on_cycle, &new_blk)))
698                         goto bp_err;
699                 if (new_blk != -1)
700                         head_blk = new_blk;
701         } else {                /* need to read 2 parts of log */
702                 /*
703                  * We are going to scan backwards in the log in two parts.
704                  * First we scan the physical end of the log.  In this part
705                  * of the log, we are looking for blocks with cycle number
706                  * last_half_cycle - 1.
707                  * If we find one, then we know that the log starts there, as
708                  * we've found a hole that didn't get written in going around
709                  * the end of the physical log.  The simple case for this is
710                  *        x + 1 ... | x ... | x - 1 | x
711                  *        <---------> less than scan distance
712                  * If all of the blocks at the end of the log have cycle number
713                  * last_half_cycle, then we check the blocks at the start of
714                  * the log looking for occurrences of last_half_cycle.  If we
715                  * find one, then our current estimate for the location of the
716                  * first occurrence of last_half_cycle is wrong and we move
717                  * back to the hole we've found.  This case looks like
718                  *        x + 1 ... | x | x + 1 | x ...
719                  *                               ^ binary search stopped here
720                  * Another case we need to handle that only occurs in 256k
721                  * logs is
722                  *        x + 1 ... | x ... | x+1 | x ...
723                  *                   ^ binary search stops here
724                  * In a 256k log, the scan at the end of the log will see the
725                  * x + 1 blocks.  We need to skip past those since that is
726                  * certainly not the head of the log.  By searching for
727                  * last_half_cycle-1 we accomplish that.
728                  */
729                 ASSERT(head_blk <= INT_MAX &&
730                         (xfs_daddr_t) num_scan_bblks >= head_blk);
731                 start_blk = log_bbnum - (num_scan_bblks - head_blk);
732                 if ((error = xlog_find_verify_cycle(log, start_blk,
733                                         num_scan_bblks - (int)head_blk,
734                                         (stop_on_cycle - 1), &new_blk)))
735                         goto bp_err;
736                 if (new_blk != -1) {
737                         head_blk = new_blk;
738                         goto validate_head;
739                 }
740
741                 /*
742                  * Scan beginning of log now.  The last part of the physical
743                  * log is good.  This scan needs to verify that it doesn't find
744                  * the last_half_cycle.
745                  */
746                 start_blk = 0;
747                 ASSERT(head_blk <= INT_MAX);
748                 if ((error = xlog_find_verify_cycle(log,
749                                         start_blk, (int)head_blk,
750                                         stop_on_cycle, &new_blk)))
751                         goto bp_err;
752                 if (new_blk != -1)
753                         head_blk = new_blk;
754         }
755
756 validate_head:
757         /*
758          * Now we need to make sure head_blk is not pointing to a block in
759          * the middle of a log record.
760          */
761         num_scan_bblks = XLOG_REC_SHIFT(log);
762         if (head_blk >= num_scan_bblks) {
763                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
764
765                 /* start ptr at last block ptr before head_blk */
766                 if ((error = xlog_find_verify_log_record(log, start_blk,
767                                                         &head_blk, 0)) == -1) {
768                         error = XFS_ERROR(EIO);
769                         goto bp_err;
770                 } else if (error)
771                         goto bp_err;
772         } else {
773                 start_blk = 0;
774                 ASSERT(head_blk <= INT_MAX);
775                 if ((error = xlog_find_verify_log_record(log, start_blk,
776                                                         &head_blk, 0)) == -1) {
777                         /* We hit the beginning of the log during our search */
778                         start_blk = log_bbnum - (num_scan_bblks - head_blk);
779                         new_blk = log_bbnum;
780                         ASSERT(start_blk <= INT_MAX &&
781                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
782                         ASSERT(head_blk <= INT_MAX);
783                         if ((error = xlog_find_verify_log_record(log,
784                                                         start_blk, &new_blk,
785                                                         (int)head_blk)) == -1) {
786                                 error = XFS_ERROR(EIO);
787                                 goto bp_err;
788                         } else if (error)
789                                 goto bp_err;
790                         if (new_blk != log_bbnum)
791                                 head_blk = new_blk;
792                 } else if (error)
793                         goto bp_err;
794         }
795
796         xlog_put_bp(bp);
797         if (head_blk == log_bbnum)
798                 *return_head_blk = 0;
799         else
800                 *return_head_blk = head_blk;
801         /*
802          * When returning here, we have a good block number.  Bad block
803          * means that during a previous crash, we didn't have a clean break
804          * from cycle number N to cycle number N-1.  In this case, we need
805          * to find the first block with cycle number N-1.
806          */
807         return 0;
808
809  bp_err:
810         xlog_put_bp(bp);
811
812         if (error)
813             xlog_warn("XFS: failed to find log head");
814         return error;
815 }
816
817 /*
818  * Find the sync block number or the tail of the log.
819  *
820  * This will be the block number of the last record to have its
821  * associated buffers synced to disk.  Every log record header has
822  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
823  * to get a sync block number.  The only concern is to figure out which
824  * log record header to believe.
825  *
826  * The following algorithm uses the log record header with the largest
827  * lsn.  The entire log record does not need to be valid.  We only care
828  * that the header is valid.
829  *
830  * We could speed up search by using current head_blk buffer, but it is not
831  * available.
832  */
833 STATIC int
834 xlog_find_tail(
835         xlog_t                  *log,
836         xfs_daddr_t             *head_blk,
837         xfs_daddr_t             *tail_blk)
838 {
839         xlog_rec_header_t       *rhead;
840         xlog_op_header_t        *op_head;
841         xfs_caddr_t             offset = NULL;
842         xfs_buf_t               *bp;
843         int                     error, i, found;
844         xfs_daddr_t             umount_data_blk;
845         xfs_daddr_t             after_umount_blk;
846         xfs_lsn_t               tail_lsn;
847         int                     hblks;
848
849         found = 0;
850
851         /*
852          * Find previous log record
853          */
854         if ((error = xlog_find_head(log, head_blk)))
855                 return error;
856
857         bp = xlog_get_bp(log, 1);
858         if (!bp)
859                 return ENOMEM;
860         if (*head_blk == 0) {                           /* special case */
861                 error = xlog_bread(log, 0, 1, bp, &offset);
862                 if (error)
863                         goto done;
864
865                 if (xlog_get_cycle(offset) == 0) {
866                         *tail_blk = 0;
867                         /* leave all other log inited values alone */
868                         goto done;
869                 }
870         }
871
872         /*
873          * Search backwards looking for log record header block
874          */
875         ASSERT(*head_blk < INT_MAX);
876         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
877                 error = xlog_bread(log, i, 1, bp, &offset);
878                 if (error)
879                         goto done;
880
881                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
882                         found = 1;
883                         break;
884                 }
885         }
886         /*
887          * If we haven't found the log record header block, start looking
888          * again from the end of the physical log.  XXXmiken: There should be
889          * a check here to make sure we didn't search more than N blocks in
890          * the previous code.
891          */
892         if (!found) {
893                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
894                         error = xlog_bread(log, i, 1, bp, &offset);
895                         if (error)
896                                 goto done;
897
898                         if (XLOG_HEADER_MAGIC_NUM ==
899                             be32_to_cpu(*(__be32 *)offset)) {
900                                 found = 2;
901                                 break;
902                         }
903                 }
904         }
905         if (!found) {
906                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
907                 ASSERT(0);
908                 return XFS_ERROR(EIO);
909         }
910
911         /* find blk_no of tail of log */
912         rhead = (xlog_rec_header_t *)offset;
913         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
914
915         /*
916          * Reset log values according to the state of the log when we
917          * crashed.  In the case where head_blk == 0, we bump curr_cycle
918          * one because the next write starts a new cycle rather than
919          * continuing the cycle of the last good log record.  At this
920          * point we have guaranteed that all partial log records have been
921          * accounted for.  Therefore, we know that the last good log record
922          * written was complete and ended exactly on the end boundary
923          * of the physical log.
924          */
925         log->l_prev_block = i;
926         log->l_curr_block = (int)*head_blk;
927         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
928         if (found == 2)
929                 log->l_curr_cycle++;
930         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
931         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
932         log->l_grant_reserve_cycle = log->l_curr_cycle;
933         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
934         log->l_grant_write_cycle = log->l_curr_cycle;
935         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
936
937         /*
938          * Look for unmount record.  If we find it, then we know there
939          * was a clean unmount.  Since 'i' could be the last block in
940          * the physical log, we convert to a log block before comparing
941          * to the head_blk.
942          *
943          * Save the current tail lsn to use to pass to
944          * xlog_clear_stale_blocks() below.  We won't want to clear the
945          * unmount record if there is one, so we pass the lsn of the
946          * unmount record rather than the block after it.
947          */
948         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
949                 int     h_size = be32_to_cpu(rhead->h_size);
950                 int     h_version = be32_to_cpu(rhead->h_version);
951
952                 if ((h_version & XLOG_VERSION_2) &&
953                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
954                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
955                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
956                                 hblks++;
957                 } else {
958                         hblks = 1;
959                 }
960         } else {
961                 hblks = 1;
962         }
963         after_umount_blk = (i + hblks + (int)
964                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
965         tail_lsn = log->l_tail_lsn;
966         if (*head_blk == after_umount_blk &&
967             be32_to_cpu(rhead->h_num_logops) == 1) {
968                 umount_data_blk = (i + hblks) % log->l_logBBsize;
969                 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
970                 if (error)
971                         goto done;
972
973                 op_head = (xlog_op_header_t *)offset;
974                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
975                         /*
976                          * Set tail and last sync so that newly written
977                          * log records will point recovery to after the
978                          * current unmount record.
979                          */
980                         log->l_tail_lsn =
981                                 xlog_assign_lsn(log->l_curr_cycle,
982                                                 after_umount_blk);
983                         log->l_last_sync_lsn =
984                                 xlog_assign_lsn(log->l_curr_cycle,
985                                                 after_umount_blk);
986                         *tail_blk = after_umount_blk;
987
988                         /*
989                          * Note that the unmount was clean. If the unmount
990                          * was not clean, we need to know this to rebuild the
991                          * superblock counters from the perag headers if we
992                          * have a filesystem using non-persistent counters.
993                          */
994                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
995                 }
996         }
997
998         /*
999          * Make sure that there are no blocks in front of the head
1000          * with the same cycle number as the head.  This can happen
1001          * because we allow multiple outstanding log writes concurrently,
1002          * and the later writes might make it out before earlier ones.
1003          *
1004          * We use the lsn from before modifying it so that we'll never
1005          * overwrite the unmount record after a clean unmount.
1006          *
1007          * Do this only if we are going to recover the filesystem
1008          *
1009          * NOTE: This used to say "if (!readonly)"
1010          * However on Linux, we can & do recover a read-only filesystem.
1011          * We only skip recovery if NORECOVERY is specified on mount,
1012          * in which case we would not be here.
1013          *
1014          * But... if the -device- itself is readonly, just skip this.
1015          * We can't recover this device anyway, so it won't matter.
1016          */
1017         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1018                 error = xlog_clear_stale_blocks(log, tail_lsn);
1019
1020 done:
1021         xlog_put_bp(bp);
1022
1023         if (error)
1024                 xlog_warn("XFS: failed to locate log tail");
1025         return error;
1026 }
1027
1028 /*
1029  * Is the log zeroed at all?
1030  *
1031  * The last binary search should be changed to perform an X block read
1032  * once X becomes small enough.  You can then search linearly through
1033  * the X blocks.  This will cut down on the number of reads we need to do.
1034  *
1035  * If the log is partially zeroed, this routine will pass back the blkno
1036  * of the first block with cycle number 0.  It won't have a complete LR
1037  * preceding it.
1038  *
1039  * Return:
1040  *      0  => the log is completely written to
1041  *      -1 => use *blk_no as the first block of the log
1042  *      >0 => error has occurred
1043  */
1044 STATIC int
1045 xlog_find_zeroed(
1046         xlog_t          *log,
1047         xfs_daddr_t     *blk_no)
1048 {
1049         xfs_buf_t       *bp;
1050         xfs_caddr_t     offset;
1051         uint            first_cycle, last_cycle;
1052         xfs_daddr_t     new_blk, last_blk, start_blk;
1053         xfs_daddr_t     num_scan_bblks;
1054         int             error, log_bbnum = log->l_logBBsize;
1055
1056         *blk_no = 0;
1057
1058         /* check totally zeroed log */
1059         bp = xlog_get_bp(log, 1);
1060         if (!bp)
1061                 return ENOMEM;
1062         error = xlog_bread(log, 0, 1, bp, &offset);
1063         if (error)
1064                 goto bp_err;
1065
1066         first_cycle = xlog_get_cycle(offset);
1067         if (first_cycle == 0) {         /* completely zeroed log */
1068                 *blk_no = 0;
1069                 xlog_put_bp(bp);
1070                 return -1;
1071         }
1072
1073         /* check partially zeroed log */
1074         error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1075         if (error)
1076                 goto bp_err;
1077
1078         last_cycle = xlog_get_cycle(offset);
1079         if (last_cycle != 0) {          /* log completely written to */
1080                 xlog_put_bp(bp);
1081                 return 0;
1082         } else if (first_cycle != 1) {
1083                 /*
1084                  * If the cycle of the last block is zero, the cycle of
1085                  * the first block must be 1. If it's not, maybe we're
1086                  * not looking at a log... Bail out.
1087                  */
1088                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1089                 return XFS_ERROR(EINVAL);
1090         }
1091
1092         /* we have a partially zeroed log */
1093         last_blk = log_bbnum-1;
1094         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1095                 goto bp_err;
1096
1097         /*
1098          * Validate the answer.  Because there is no way to guarantee that
1099          * the entire log is made up of log records which are the same size,
1100          * we scan over the defined maximum blocks.  At this point, the maximum
1101          * is not chosen to mean anything special.   XXXmiken
1102          */
1103         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1104         ASSERT(num_scan_bblks <= INT_MAX);
1105
1106         if (last_blk < num_scan_bblks)
1107                 num_scan_bblks = last_blk;
1108         start_blk = last_blk - num_scan_bblks;
1109
1110         /*
1111          * We search for any instances of cycle number 0 that occur before
1112          * our current estimate of the head.  What we're trying to detect is
1113          *        1 ... | 0 | 1 | 0...
1114          *                       ^ binary search ends here
1115          */
1116         if ((error = xlog_find_verify_cycle(log, start_blk,
1117                                          (int)num_scan_bblks, 0, &new_blk)))
1118                 goto bp_err;
1119         if (new_blk != -1)
1120                 last_blk = new_blk;
1121
1122         /*
1123          * Potentially backup over partial log record write.  We don't need
1124          * to search the end of the log because we know it is zero.
1125          */
1126         if ((error = xlog_find_verify_log_record(log, start_blk,
1127                                 &last_blk, 0)) == -1) {
1128             error = XFS_ERROR(EIO);
1129             goto bp_err;
1130         } else if (error)
1131             goto bp_err;
1132
1133         *blk_no = last_blk;
1134 bp_err:
1135         xlog_put_bp(bp);
1136         if (error)
1137                 return error;
1138         return -1;
1139 }
1140
1141 /*
1142  * These are simple subroutines used by xlog_clear_stale_blocks() below
1143  * to initialize a buffer full of empty log record headers and write
1144  * them into the log.
1145  */
1146 STATIC void
1147 xlog_add_record(
1148         xlog_t                  *log,
1149         xfs_caddr_t             buf,
1150         int                     cycle,
1151         int                     block,
1152         int                     tail_cycle,
1153         int                     tail_block)
1154 {
1155         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1156
1157         memset(buf, 0, BBSIZE);
1158         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1159         recp->h_cycle = cpu_to_be32(cycle);
1160         recp->h_version = cpu_to_be32(
1161                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1162         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1163         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1164         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1165         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1166 }
1167
1168 STATIC int
1169 xlog_write_log_records(
1170         xlog_t          *log,
1171         int             cycle,
1172         int             start_block,
1173         int             blocks,
1174         int             tail_cycle,
1175         int             tail_block)
1176 {
1177         xfs_caddr_t     offset;
1178         xfs_buf_t       *bp;
1179         int             balign, ealign;
1180         int             sectbb = log->l_sectBBsize;
1181         int             end_block = start_block + blocks;
1182         int             bufblks;
1183         int             error = 0;
1184         int             i, j = 0;
1185
1186         /*
1187          * Greedily allocate a buffer big enough to handle the full
1188          * range of basic blocks to be written.  If that fails, try
1189          * a smaller size.  We need to be able to write at least a
1190          * log sector, or we're out of luck.
1191          */
1192         bufblks = 1 << ffs(blocks);
1193         while (!(bp = xlog_get_bp(log, bufblks))) {
1194                 bufblks >>= 1;
1195                 if (bufblks < sectbb)
1196                         return ENOMEM;
1197         }
1198
1199         /* We may need to do a read at the start to fill in part of
1200          * the buffer in the starting sector not covered by the first
1201          * write below.
1202          */
1203         balign = round_down(start_block, sectbb);
1204         if (balign != start_block) {
1205                 error = xlog_bread_noalign(log, start_block, 1, bp);
1206                 if (error)
1207                         goto out_put_bp;
1208
1209                 j = start_block - balign;
1210         }
1211
1212         for (i = start_block; i < end_block; i += bufblks) {
1213                 int             bcount, endcount;
1214
1215                 bcount = min(bufblks, end_block - start_block);
1216                 endcount = bcount - j;
1217
1218                 /* We may need to do a read at the end to fill in part of
1219                  * the buffer in the final sector not covered by the write.
1220                  * If this is the same sector as the above read, skip it.
1221                  */
1222                 ealign = round_down(end_block, sectbb);
1223                 if (j == 0 && (start_block + endcount > ealign)) {
1224                         offset = XFS_BUF_PTR(bp);
1225                         balign = BBTOB(ealign - start_block);
1226                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1227                                                 BBTOB(sectbb));
1228                         if (error)
1229                                 break;
1230
1231                         error = xlog_bread_noalign(log, ealign, sectbb, bp);
1232                         if (error)
1233                                 break;
1234
1235                         error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1236                         if (error)
1237                                 break;
1238                 }
1239
1240                 offset = xlog_align(log, start_block, endcount, bp);
1241                 for (; j < endcount; j++) {
1242                         xlog_add_record(log, offset, cycle, i+j,
1243                                         tail_cycle, tail_block);
1244                         offset += BBSIZE;
1245                 }
1246                 error = xlog_bwrite(log, start_block, endcount, bp);
1247                 if (error)
1248                         break;
1249                 start_block += endcount;
1250                 j = 0;
1251         }
1252
1253  out_put_bp:
1254         xlog_put_bp(bp);
1255         return error;
1256 }
1257
1258 /*
1259  * This routine is called to blow away any incomplete log writes out
1260  * in front of the log head.  We do this so that we won't become confused
1261  * if we come up, write only a little bit more, and then crash again.
1262  * If we leave the partial log records out there, this situation could
1263  * cause us to think those partial writes are valid blocks since they
1264  * have the current cycle number.  We get rid of them by overwriting them
1265  * with empty log records with the old cycle number rather than the
1266  * current one.
1267  *
1268  * The tail lsn is passed in rather than taken from
1269  * the log so that we will not write over the unmount record after a
1270  * clean unmount in a 512 block log.  Doing so would leave the log without
1271  * any valid log records in it until a new one was written.  If we crashed
1272  * during that time we would not be able to recover.
1273  */
1274 STATIC int
1275 xlog_clear_stale_blocks(
1276         xlog_t          *log,
1277         xfs_lsn_t       tail_lsn)
1278 {
1279         int             tail_cycle, head_cycle;
1280         int             tail_block, head_block;
1281         int             tail_distance, max_distance;
1282         int             distance;
1283         int             error;
1284
1285         tail_cycle = CYCLE_LSN(tail_lsn);
1286         tail_block = BLOCK_LSN(tail_lsn);
1287         head_cycle = log->l_curr_cycle;
1288         head_block = log->l_curr_block;
1289
1290         /*
1291          * Figure out the distance between the new head of the log
1292          * and the tail.  We want to write over any blocks beyond the
1293          * head that we may have written just before the crash, but
1294          * we don't want to overwrite the tail of the log.
1295          */
1296         if (head_cycle == tail_cycle) {
1297                 /*
1298                  * The tail is behind the head in the physical log,
1299                  * so the distance from the head to the tail is the
1300                  * distance from the head to the end of the log plus
1301                  * the distance from the beginning of the log to the
1302                  * tail.
1303                  */
1304                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1305                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1306                                          XFS_ERRLEVEL_LOW, log->l_mp);
1307                         return XFS_ERROR(EFSCORRUPTED);
1308                 }
1309                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1310         } else {
1311                 /*
1312                  * The head is behind the tail in the physical log,
1313                  * so the distance from the head to the tail is just
1314                  * the tail block minus the head block.
1315                  */
1316                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1317                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1318                                          XFS_ERRLEVEL_LOW, log->l_mp);
1319                         return XFS_ERROR(EFSCORRUPTED);
1320                 }
1321                 tail_distance = tail_block - head_block;
1322         }
1323
1324         /*
1325          * If the head is right up against the tail, we can't clear
1326          * anything.
1327          */
1328         if (tail_distance <= 0) {
1329                 ASSERT(tail_distance == 0);
1330                 return 0;
1331         }
1332
1333         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1334         /*
1335          * Take the smaller of the maximum amount of outstanding I/O
1336          * we could have and the distance to the tail to clear out.
1337          * We take the smaller so that we don't overwrite the tail and
1338          * we don't waste all day writing from the head to the tail
1339          * for no reason.
1340          */
1341         max_distance = MIN(max_distance, tail_distance);
1342
1343         if ((head_block + max_distance) <= log->l_logBBsize) {
1344                 /*
1345                  * We can stomp all the blocks we need to without
1346                  * wrapping around the end of the log.  Just do it
1347                  * in a single write.  Use the cycle number of the
1348                  * current cycle minus one so that the log will look like:
1349                  *     n ... | n - 1 ...
1350                  */
1351                 error = xlog_write_log_records(log, (head_cycle - 1),
1352                                 head_block, max_distance, tail_cycle,
1353                                 tail_block);
1354                 if (error)
1355                         return error;
1356         } else {
1357                 /*
1358                  * We need to wrap around the end of the physical log in
1359                  * order to clear all the blocks.  Do it in two separate
1360                  * I/Os.  The first write should be from the head to the
1361                  * end of the physical log, and it should use the current
1362                  * cycle number minus one just like above.
1363                  */
1364                 distance = log->l_logBBsize - head_block;
1365                 error = xlog_write_log_records(log, (head_cycle - 1),
1366                                 head_block, distance, tail_cycle,
1367                                 tail_block);
1368
1369                 if (error)
1370                         return error;
1371
1372                 /*
1373                  * Now write the blocks at the start of the physical log.
1374                  * This writes the remainder of the blocks we want to clear.
1375                  * It uses the current cycle number since we're now on the
1376                  * same cycle as the head so that we get:
1377                  *    n ... n ... | n - 1 ...
1378                  *    ^^^^^ blocks we're writing
1379                  */
1380                 distance = max_distance - (log->l_logBBsize - head_block);
1381                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1382                                 tail_cycle, tail_block);
1383                 if (error)
1384                         return error;
1385         }
1386
1387         return 0;
1388 }
1389
1390 /******************************************************************************
1391  *
1392  *              Log recover routines
1393  *
1394  ******************************************************************************
1395  */
1396
1397 STATIC xlog_recover_t *
1398 xlog_recover_find_tid(
1399         struct hlist_head       *head,
1400         xlog_tid_t              tid)
1401 {
1402         xlog_recover_t          *trans;
1403         struct hlist_node       *n;
1404
1405         hlist_for_each_entry(trans, n, head, r_list) {
1406                 if (trans->r_log_tid == tid)
1407                         return trans;
1408         }
1409         return NULL;
1410 }
1411
1412 STATIC void
1413 xlog_recover_new_tid(
1414         struct hlist_head       *head,
1415         xlog_tid_t              tid,
1416         xfs_lsn_t               lsn)
1417 {
1418         xlog_recover_t          *trans;
1419
1420         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1421         trans->r_log_tid   = tid;
1422         trans->r_lsn       = lsn;
1423         INIT_LIST_HEAD(&trans->r_itemq);
1424
1425         INIT_HLIST_NODE(&trans->r_list);
1426         hlist_add_head(&trans->r_list, head);
1427 }
1428
1429 STATIC void
1430 xlog_recover_add_item(
1431         struct list_head        *head)
1432 {
1433         xlog_recover_item_t     *item;
1434
1435         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1436         INIT_LIST_HEAD(&item->ri_list);
1437         list_add_tail(&item->ri_list, head);
1438 }
1439
1440 STATIC int
1441 xlog_recover_add_to_cont_trans(
1442         struct log              *log,
1443         xlog_recover_t          *trans,
1444         xfs_caddr_t             dp,
1445         int                     len)
1446 {
1447         xlog_recover_item_t     *item;
1448         xfs_caddr_t             ptr, old_ptr;
1449         int                     old_len;
1450
1451         if (list_empty(&trans->r_itemq)) {
1452                 /* finish copying rest of trans header */
1453                 xlog_recover_add_item(&trans->r_itemq);
1454                 ptr = (xfs_caddr_t) &trans->r_theader +
1455                                 sizeof(xfs_trans_header_t) - len;
1456                 memcpy(ptr, dp, len); /* d, s, l */
1457                 return 0;
1458         }
1459         /* take the tail entry */
1460         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1461
1462         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1463         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1464
1465         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1466         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1467         item->ri_buf[item->ri_cnt-1].i_len += len;
1468         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1469         trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1470         return 0;
1471 }
1472
1473 /*
1474  * The next region to add is the start of a new region.  It could be
1475  * a whole region or it could be the first part of a new region.  Because
1476  * of this, the assumption here is that the type and size fields of all
1477  * format structures fit into the first 32 bits of the structure.
1478  *
1479  * This works because all regions must be 32 bit aligned.  Therefore, we
1480  * either have both fields or we have neither field.  In the case we have
1481  * neither field, the data part of the region is zero length.  We only have
1482  * a log_op_header and can throw away the header since a new one will appear
1483  * later.  If we have at least 4 bytes, then we can determine how many regions
1484  * will appear in the current log item.
1485  */
1486 STATIC int
1487 xlog_recover_add_to_trans(
1488         struct log              *log,
1489         xlog_recover_t          *trans,
1490         xfs_caddr_t             dp,
1491         int                     len)
1492 {
1493         xfs_inode_log_format_t  *in_f;                  /* any will do */
1494         xlog_recover_item_t     *item;
1495         xfs_caddr_t             ptr;
1496
1497         if (!len)
1498                 return 0;
1499         if (list_empty(&trans->r_itemq)) {
1500                 /* we need to catch log corruptions here */
1501                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1502                         xlog_warn("XFS: xlog_recover_add_to_trans: "
1503                                   "bad header magic number");
1504                         ASSERT(0);
1505                         return XFS_ERROR(EIO);
1506                 }
1507                 if (len == sizeof(xfs_trans_header_t))
1508                         xlog_recover_add_item(&trans->r_itemq);
1509                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1510                 return 0;
1511         }
1512
1513         ptr = kmem_alloc(len, KM_SLEEP);
1514         memcpy(ptr, dp, len);
1515         in_f = (xfs_inode_log_format_t *)ptr;
1516
1517         /* take the tail entry */
1518         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1519         if (item->ri_total != 0 &&
1520              item->ri_total == item->ri_cnt) {
1521                 /* tail item is in use, get a new one */
1522                 xlog_recover_add_item(&trans->r_itemq);
1523                 item = list_entry(trans->r_itemq.prev,
1524                                         xlog_recover_item_t, ri_list);
1525         }
1526
1527         if (item->ri_total == 0) {              /* first region to be added */
1528                 if (in_f->ilf_size == 0 ||
1529                     in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1530                         xlog_warn(
1531         "XFS: bad number of regions (%d) in inode log format",
1532                                   in_f->ilf_size);
1533                         ASSERT(0);
1534                         return XFS_ERROR(EIO);
1535                 }
1536
1537                 item->ri_total = in_f->ilf_size;
1538                 item->ri_buf =
1539                         kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1540                                     KM_SLEEP);
1541         }
1542         ASSERT(item->ri_total > item->ri_cnt);
1543         /* Description region is ri_buf[0] */
1544         item->ri_buf[item->ri_cnt].i_addr = ptr;
1545         item->ri_buf[item->ri_cnt].i_len  = len;
1546         item->ri_cnt++;
1547         trace_xfs_log_recover_item_add(log, trans, item, 0);
1548         return 0;
1549 }
1550
1551 /*
1552  * Sort the log items in the transaction. Cancelled buffers need
1553  * to be put first so they are processed before any items that might
1554  * modify the buffers. If they are cancelled, then the modifications
1555  * don't need to be replayed.
1556  */
1557 STATIC int
1558 xlog_recover_reorder_trans(
1559         struct log              *log,
1560         xlog_recover_t          *trans,
1561         int                     pass)
1562 {
1563         xlog_recover_item_t     *item, *n;
1564         LIST_HEAD(sort_list);
1565
1566         list_splice_init(&trans->r_itemq, &sort_list);
1567         list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1568                 xfs_buf_log_format_t    *buf_f;
1569
1570                 buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
1571
1572                 switch (ITEM_TYPE(item)) {
1573                 case XFS_LI_BUF:
1574                         if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1575                                 trace_xfs_log_recover_item_reorder_head(log,
1576                                                         trans, item, pass);
1577                                 list_move(&item->ri_list, &trans->r_itemq);
1578                                 break;
1579                         }
1580                 case XFS_LI_INODE:
1581                 case XFS_LI_DQUOT:
1582                 case XFS_LI_QUOTAOFF:
1583                 case XFS_LI_EFD:
1584                 case XFS_LI_EFI:
1585                         trace_xfs_log_recover_item_reorder_tail(log,
1586                                                         trans, item, pass);
1587                         list_move_tail(&item->ri_list, &trans->r_itemq);
1588                         break;
1589                 default:
1590                         xlog_warn(
1591         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1592                         ASSERT(0);
1593                         return XFS_ERROR(EIO);
1594                 }
1595         }
1596         ASSERT(list_empty(&sort_list));
1597         return 0;
1598 }
1599
1600 /*
1601  * Build up the table of buf cancel records so that we don't replay
1602  * cancelled data in the second pass.  For buffer records that are
1603  * not cancel records, there is nothing to do here so we just return.
1604  *
1605  * If we get a cancel record which is already in the table, this indicates
1606  * that the buffer was cancelled multiple times.  In order to ensure
1607  * that during pass 2 we keep the record in the table until we reach its
1608  * last occurrence in the log, we keep a reference count in the cancel
1609  * record in the table to tell us how many times we expect to see this
1610  * record during the second pass.
1611  */
1612 STATIC void
1613 xlog_recover_do_buffer_pass1(
1614         xlog_t                  *log,
1615         xfs_buf_log_format_t    *buf_f)
1616 {
1617         xfs_buf_cancel_t        *bcp;
1618         xfs_buf_cancel_t        *nextp;
1619         xfs_buf_cancel_t        *prevp;
1620         xfs_buf_cancel_t        **bucket;
1621         xfs_daddr_t             blkno = 0;
1622         uint                    len = 0;
1623         ushort                  flags = 0;
1624
1625         switch (buf_f->blf_type) {
1626         case XFS_LI_BUF:
1627                 blkno = buf_f->blf_blkno;
1628                 len = buf_f->blf_len;
1629                 flags = buf_f->blf_flags;
1630                 break;
1631         }
1632
1633         /*
1634          * If this isn't a cancel buffer item, then just return.
1635          */
1636         if (!(flags & XFS_BLF_CANCEL)) {
1637                 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1638                 return;
1639         }
1640
1641         /*
1642          * Insert an xfs_buf_cancel record into the hash table of
1643          * them.  If there is already an identical record, bump
1644          * its reference count.
1645          */
1646         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1647                                           XLOG_BC_TABLE_SIZE];
1648         /*
1649          * If the hash bucket is empty then just insert a new record into
1650          * the bucket.
1651          */
1652         if (*bucket == NULL) {
1653                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1654                                                      KM_SLEEP);
1655                 bcp->bc_blkno = blkno;
1656                 bcp->bc_len = len;
1657                 bcp->bc_refcount = 1;
1658                 bcp->bc_next = NULL;
1659                 *bucket = bcp;
1660                 return;
1661         }
1662
1663         /*
1664          * The hash bucket is not empty, so search for duplicates of our
1665          * record.  If we find one them just bump its refcount.  If not
1666          * then add us at the end of the list.
1667          */
1668         prevp = NULL;
1669         nextp = *bucket;
1670         while (nextp != NULL) {
1671                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1672                         nextp->bc_refcount++;
1673                         trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1674                         return;
1675                 }
1676                 prevp = nextp;
1677                 nextp = nextp->bc_next;
1678         }
1679         ASSERT(prevp != NULL);
1680         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1681                                              KM_SLEEP);
1682         bcp->bc_blkno = blkno;
1683         bcp->bc_len = len;
1684         bcp->bc_refcount = 1;
1685         bcp->bc_next = NULL;
1686         prevp->bc_next = bcp;
1687         trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1688 }
1689
1690 /*
1691  * Check to see whether the buffer being recovered has a corresponding
1692  * entry in the buffer cancel record table.  If it does then return 1
1693  * so that it will be cancelled, otherwise return 0.  If the buffer is
1694  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1695  * the refcount on the entry in the table and remove it from the table
1696  * if this is the last reference.
1697  *
1698  * We remove the cancel record from the table when we encounter its
1699  * last occurrence in the log so that if the same buffer is re-used
1700  * again after its last cancellation we actually replay the changes
1701  * made at that point.
1702  */
1703 STATIC int
1704 xlog_check_buffer_cancelled(
1705         xlog_t                  *log,
1706         xfs_daddr_t             blkno,
1707         uint                    len,
1708         ushort                  flags)
1709 {
1710         xfs_buf_cancel_t        *bcp;
1711         xfs_buf_cancel_t        *prevp;
1712         xfs_buf_cancel_t        **bucket;
1713
1714         if (log->l_buf_cancel_table == NULL) {
1715                 /*
1716                  * There is nothing in the table built in pass one,
1717                  * so this buffer must not be cancelled.
1718                  */
1719                 ASSERT(!(flags & XFS_BLF_CANCEL));
1720                 return 0;
1721         }
1722
1723         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1724                                           XLOG_BC_TABLE_SIZE];
1725         bcp = *bucket;
1726         if (bcp == NULL) {
1727                 /*
1728                  * There is no corresponding entry in the table built
1729                  * in pass one, so this buffer has not been cancelled.
1730                  */
1731                 ASSERT(!(flags & XFS_BLF_CANCEL));
1732                 return 0;
1733         }
1734
1735         /*
1736          * Search for an entry in the buffer cancel table that
1737          * matches our buffer.
1738          */
1739         prevp = NULL;
1740         while (bcp != NULL) {
1741                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1742                         /*
1743                          * We've go a match, so return 1 so that the
1744                          * recovery of this buffer is cancelled.
1745                          * If this buffer is actually a buffer cancel
1746                          * log item, then decrement the refcount on the
1747                          * one in the table and remove it if this is the
1748                          * last reference.
1749                          */
1750                         if (flags & XFS_BLF_CANCEL) {
1751                                 bcp->bc_refcount--;
1752                                 if (bcp->bc_refcount == 0) {
1753                                         if (prevp == NULL) {
1754                                                 *bucket = bcp->bc_next;
1755                                         } else {
1756                                                 prevp->bc_next = bcp->bc_next;
1757                                         }
1758                                         kmem_free(bcp);
1759                                 }
1760                         }
1761                         return 1;
1762                 }
1763                 prevp = bcp;
1764                 bcp = bcp->bc_next;
1765         }
1766         /*
1767          * We didn't find a corresponding entry in the table, so
1768          * return 0 so that the buffer is NOT cancelled.
1769          */
1770         ASSERT(!(flags & XFS_BLF_CANCEL));
1771         return 0;
1772 }
1773
1774 STATIC int
1775 xlog_recover_do_buffer_pass2(
1776         xlog_t                  *log,
1777         xfs_buf_log_format_t    *buf_f)
1778 {
1779         xfs_daddr_t             blkno = 0;
1780         ushort                  flags = 0;
1781         uint                    len = 0;
1782
1783         switch (buf_f->blf_type) {
1784         case XFS_LI_BUF:
1785                 blkno = buf_f->blf_blkno;
1786                 flags = buf_f->blf_flags;
1787                 len = buf_f->blf_len;
1788                 break;
1789         }
1790
1791         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1792 }
1793
1794 /*
1795  * Perform recovery for a buffer full of inodes.  In these buffers,
1796  * the only data which should be recovered is that which corresponds
1797  * to the di_next_unlinked pointers in the on disk inode structures.
1798  * The rest of the data for the inodes is always logged through the
1799  * inodes themselves rather than the inode buffer and is recovered
1800  * in xlog_recover_do_inode_trans().
1801  *
1802  * The only time when buffers full of inodes are fully recovered is
1803  * when the buffer is full of newly allocated inodes.  In this case
1804  * the buffer will not be marked as an inode buffer and so will be
1805  * sent to xlog_recover_do_reg_buffer() below during recovery.
1806  */
1807 STATIC int
1808 xlog_recover_do_inode_buffer(
1809         xfs_mount_t             *mp,
1810         xlog_recover_item_t     *item,
1811         xfs_buf_t               *bp,
1812         xfs_buf_log_format_t    *buf_f)
1813 {
1814         int                     i;
1815         int                     item_index;
1816         int                     bit;
1817         int                     nbits;
1818         int                     reg_buf_offset;
1819         int                     reg_buf_bytes;
1820         int                     next_unlinked_offset;
1821         int                     inodes_per_buf;
1822         xfs_agino_t             *logged_nextp;
1823         xfs_agino_t             *buffer_nextp;
1824         unsigned int            *data_map = NULL;
1825         unsigned int            map_size = 0;
1826
1827         trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1828
1829         switch (buf_f->blf_type) {
1830         case XFS_LI_BUF:
1831                 data_map = buf_f->blf_data_map;
1832                 map_size = buf_f->blf_map_size;
1833                 break;
1834         }
1835         /*
1836          * Set the variables corresponding to the current region to
1837          * 0 so that we'll initialize them on the first pass through
1838          * the loop.
1839          */
1840         reg_buf_offset = 0;
1841         reg_buf_bytes = 0;
1842         bit = 0;
1843         nbits = 0;
1844         item_index = 0;
1845         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1846         for (i = 0; i < inodes_per_buf; i++) {
1847                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1848                         offsetof(xfs_dinode_t, di_next_unlinked);
1849
1850                 while (next_unlinked_offset >=
1851                        (reg_buf_offset + reg_buf_bytes)) {
1852                         /*
1853                          * The next di_next_unlinked field is beyond
1854                          * the current logged region.  Find the next
1855                          * logged region that contains or is beyond
1856                          * the current di_next_unlinked field.
1857                          */
1858                         bit += nbits;
1859                         bit = xfs_next_bit(data_map, map_size, bit);
1860
1861                         /*
1862                          * If there are no more logged regions in the
1863                          * buffer, then we're done.
1864                          */
1865                         if (bit == -1) {
1866                                 return 0;
1867                         }
1868
1869                         nbits = xfs_contig_bits(data_map, map_size,
1870                                                          bit);
1871                         ASSERT(nbits > 0);
1872                         reg_buf_offset = bit << XFS_BLF_SHIFT;
1873                         reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1874                         item_index++;
1875                 }
1876
1877                 /*
1878                  * If the current logged region starts after the current
1879                  * di_next_unlinked field, then move on to the next
1880                  * di_next_unlinked field.
1881                  */
1882                 if (next_unlinked_offset < reg_buf_offset) {
1883                         continue;
1884                 }
1885
1886                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1887                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1888                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1889
1890                 /*
1891                  * The current logged region contains a copy of the
1892                  * current di_next_unlinked field.  Extract its value
1893                  * and copy it to the buffer copy.
1894                  */
1895                 logged_nextp = (xfs_agino_t *)
1896                                ((char *)(item->ri_buf[item_index].i_addr) +
1897                                 (next_unlinked_offset - reg_buf_offset));
1898                 if (unlikely(*logged_nextp == 0)) {
1899                         xfs_fs_cmn_err(CE_ALERT, mp,
1900                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1901                                 item, bp);
1902                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1903                                          XFS_ERRLEVEL_LOW, mp);
1904                         return XFS_ERROR(EFSCORRUPTED);
1905                 }
1906
1907                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1908                                               next_unlinked_offset);
1909                 *buffer_nextp = *logged_nextp;
1910         }
1911
1912         return 0;
1913 }
1914
1915 /*
1916  * Perform a 'normal' buffer recovery.  Each logged region of the
1917  * buffer should be copied over the corresponding region in the
1918  * given buffer.  The bitmap in the buf log format structure indicates
1919  * where to place the logged data.
1920  */
1921 /*ARGSUSED*/
1922 STATIC void
1923 xlog_recover_do_reg_buffer(
1924         struct xfs_mount        *mp,
1925         xlog_recover_item_t     *item,
1926         xfs_buf_t               *bp,
1927         xfs_buf_log_format_t    *buf_f)
1928 {
1929         int                     i;
1930         int                     bit;
1931         int                     nbits;
1932         unsigned int            *data_map = NULL;
1933         unsigned int            map_size = 0;
1934         int                     error;
1935
1936         trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1937
1938         switch (buf_f->blf_type) {
1939         case XFS_LI_BUF:
1940                 data_map = buf_f->blf_data_map;
1941                 map_size = buf_f->blf_map_size;
1942                 break;
1943         }
1944         bit = 0;
1945         i = 1;  /* 0 is the buf format structure */
1946         while (1) {
1947                 bit = xfs_next_bit(data_map, map_size, bit);
1948                 if (bit == -1)
1949                         break;
1950                 nbits = xfs_contig_bits(data_map, map_size, bit);
1951                 ASSERT(nbits > 0);
1952                 ASSERT(item->ri_buf[i].i_addr != NULL);
1953                 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1954                 ASSERT(XFS_BUF_COUNT(bp) >=
1955                        ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
1956
1957                 /*
1958                  * Do a sanity check if this is a dquot buffer. Just checking
1959                  * the first dquot in the buffer should do. XXXThis is
1960                  * probably a good thing to do for other buf types also.
1961                  */
1962                 error = 0;
1963                 if (buf_f->blf_flags &
1964                    (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1965                         if (item->ri_buf[i].i_addr == NULL) {
1966                                 cmn_err(CE_ALERT,
1967                                         "XFS: NULL dquot in %s.", __func__);
1968                                 goto next;
1969                         }
1970                         if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1971                                 cmn_err(CE_ALERT,
1972                                         "XFS: dquot too small (%d) in %s.",
1973                                         item->ri_buf[i].i_len, __func__);
1974                                 goto next;
1975                         }
1976                         error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
1977                                                item->ri_buf[i].i_addr,
1978                                                -1, 0, XFS_QMOPT_DOWARN,
1979                                                "dquot_buf_recover");
1980                         if (error)
1981                                 goto next;
1982                 }
1983
1984                 memcpy(xfs_buf_offset(bp,
1985                         (uint)bit << XFS_BLF_SHIFT),    /* dest */
1986                         item->ri_buf[i].i_addr,         /* source */
1987                         nbits<<XFS_BLF_SHIFT);          /* length */
1988  next:
1989                 i++;
1990                 bit += nbits;
1991         }
1992
1993         /* Shouldn't be any more regions */
1994         ASSERT(i == item->ri_total);
1995 }
1996
1997 /*
1998  * Do some primitive error checking on ondisk dquot data structures.
1999  */
2000 int
2001 xfs_qm_dqcheck(
2002         xfs_disk_dquot_t *ddq,
2003         xfs_dqid_t       id,
2004         uint             type,    /* used only when IO_dorepair is true */
2005         uint             flags,
2006         char             *str)
2007 {
2008         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
2009         int             errs = 0;
2010
2011         /*
2012          * We can encounter an uninitialized dquot buffer for 2 reasons:
2013          * 1. If we crash while deleting the quotainode(s), and those blks got
2014          *    used for user data. This is because we take the path of regular
2015          *    file deletion; however, the size field of quotainodes is never
2016          *    updated, so all the tricks that we play in itruncate_finish
2017          *    don't quite matter.
2018          *
2019          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2020          *    But the allocation will be replayed so we'll end up with an
2021          *    uninitialized quota block.
2022          *
2023          * This is all fine; things are still consistent, and we haven't lost
2024          * any quota information. Just don't complain about bad dquot blks.
2025          */
2026         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
2027                 if (flags & XFS_QMOPT_DOWARN)
2028                         cmn_err(CE_ALERT,
2029                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2030                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2031                 errs++;
2032         }
2033         if (ddq->d_version != XFS_DQUOT_VERSION) {
2034                 if (flags & XFS_QMOPT_DOWARN)
2035                         cmn_err(CE_ALERT,
2036                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2037                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
2038                 errs++;
2039         }
2040
2041         if (ddq->d_flags != XFS_DQ_USER &&
2042             ddq->d_flags != XFS_DQ_PROJ &&
2043             ddq->d_flags != XFS_DQ_GROUP) {
2044                 if (flags & XFS_QMOPT_DOWARN)
2045                         cmn_err(CE_ALERT,
2046                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2047                         str, id, ddq->d_flags);
2048                 errs++;
2049         }
2050
2051         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2052                 if (flags & XFS_QMOPT_DOWARN)
2053                         cmn_err(CE_ALERT,
2054                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2055                         "0x%x expected, found id 0x%x",
2056                         str, ddq, id, be32_to_cpu(ddq->d_id));
2057                 errs++;
2058         }
2059
2060         if (!errs && ddq->d_id) {
2061                 if (ddq->d_blk_softlimit &&
2062                     be64_to_cpu(ddq->d_bcount) >=
2063                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2064                         if (!ddq->d_btimer) {
2065                                 if (flags & XFS_QMOPT_DOWARN)
2066                                         cmn_err(CE_ALERT,
2067                                         "%s : Dquot ID 0x%x (0x%p) "
2068                                         "BLK TIMER NOT STARTED",
2069                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2070                                 errs++;
2071                         }
2072                 }
2073                 if (ddq->d_ino_softlimit &&
2074                     be64_to_cpu(ddq->d_icount) >=
2075                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2076                         if (!ddq->d_itimer) {
2077                                 if (flags & XFS_QMOPT_DOWARN)
2078                                         cmn_err(CE_ALERT,
2079                                         "%s : Dquot ID 0x%x (0x%p) "
2080                                         "INODE TIMER NOT STARTED",
2081                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2082                                 errs++;
2083                         }
2084                 }
2085                 if (ddq->d_rtb_softlimit &&
2086                     be64_to_cpu(ddq->d_rtbcount) >=
2087                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2088                         if (!ddq->d_rtbtimer) {
2089                                 if (flags & XFS_QMOPT_DOWARN)
2090                                         cmn_err(CE_ALERT,
2091                                         "%s : Dquot ID 0x%x (0x%p) "
2092                                         "RTBLK TIMER NOT STARTED",
2093                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2094                                 errs++;
2095                         }
2096                 }
2097         }
2098
2099         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2100                 return errs;
2101
2102         if (flags & XFS_QMOPT_DOWARN)
2103                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2104
2105         /*
2106          * Typically, a repair is only requested by quotacheck.
2107          */
2108         ASSERT(id != -1);
2109         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2110         memset(d, 0, sizeof(xfs_dqblk_t));
2111
2112         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2113         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2114         d->dd_diskdq.d_flags = type;
2115         d->dd_diskdq.d_id = cpu_to_be32(id);
2116
2117         return errs;
2118 }
2119
2120 /*
2121  * Perform a dquot buffer recovery.
2122  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2123  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2124  * Else, treat it as a regular buffer and do recovery.
2125  */
2126 STATIC void
2127 xlog_recover_do_dquot_buffer(
2128         xfs_mount_t             *mp,
2129         xlog_t                  *log,
2130         xlog_recover_item_t     *item,
2131         xfs_buf_t               *bp,
2132         xfs_buf_log_format_t    *buf_f)
2133 {
2134         uint                    type;
2135
2136         trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2137
2138         /*
2139          * Filesystems are required to send in quota flags at mount time.
2140          */
2141         if (mp->m_qflags == 0) {
2142                 return;
2143         }
2144
2145         type = 0;
2146         if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2147                 type |= XFS_DQ_USER;
2148         if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2149                 type |= XFS_DQ_PROJ;
2150         if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2151                 type |= XFS_DQ_GROUP;
2152         /*
2153          * This type of quotas was turned off, so ignore this buffer
2154          */
2155         if (log->l_quotaoffs_flag & type)
2156                 return;
2157
2158         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2159 }
2160
2161 /*
2162  * This routine replays a modification made to a buffer at runtime.
2163  * There are actually two types of buffer, regular and inode, which
2164  * are handled differently.  Inode buffers are handled differently
2165  * in that we only recover a specific set of data from them, namely
2166  * the inode di_next_unlinked fields.  This is because all other inode
2167  * data is actually logged via inode records and any data we replay
2168  * here which overlaps that may be stale.
2169  *
2170  * When meta-data buffers are freed at run time we log a buffer item
2171  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2172  * of the buffer in the log should not be replayed at recovery time.
2173  * This is so that if the blocks covered by the buffer are reused for
2174  * file data before we crash we don't end up replaying old, freed
2175  * meta-data into a user's file.
2176  *
2177  * To handle the cancellation of buffer log items, we make two passes
2178  * over the log during recovery.  During the first we build a table of
2179  * those buffers which have been cancelled, and during the second we
2180  * only replay those buffers which do not have corresponding cancel
2181  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2182  * for more details on the implementation of the table of cancel records.
2183  */
2184 STATIC int
2185 xlog_recover_do_buffer_trans(
2186         xlog_t                  *log,
2187         xlog_recover_item_t     *item,
2188         int                     pass)
2189 {
2190         xfs_buf_log_format_t    *buf_f;
2191         xfs_mount_t             *mp;
2192         xfs_buf_t               *bp;
2193         int                     error;
2194         int                     cancel;
2195         xfs_daddr_t             blkno;
2196         int                     len;
2197         ushort                  flags;
2198         uint                    buf_flags;
2199
2200         buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
2201
2202         if (pass == XLOG_RECOVER_PASS1) {
2203                 /*
2204                  * In this pass we're only looking for buf items
2205                  * with the XFS_BLF_CANCEL bit set.
2206                  */
2207                 xlog_recover_do_buffer_pass1(log, buf_f);
2208                 return 0;
2209         } else {
2210                 /*
2211                  * In this pass we want to recover all the buffers
2212                  * which have not been cancelled and are not
2213                  * cancellation buffers themselves.  The routine
2214                  * we call here will tell us whether or not to
2215                  * continue with the replay of this buffer.
2216                  */
2217                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2218                 if (cancel) {
2219                         trace_xfs_log_recover_buf_cancel(log, buf_f);
2220                         return 0;
2221                 }
2222         }
2223         trace_xfs_log_recover_buf_recover(log, buf_f);
2224         switch (buf_f->blf_type) {
2225         case XFS_LI_BUF:
2226                 blkno = buf_f->blf_blkno;
2227                 len = buf_f->blf_len;
2228                 flags = buf_f->blf_flags;
2229                 break;
2230         default:
2231                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2232                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2233                         buf_f->blf_type, log->l_mp->m_logname ?
2234                         log->l_mp->m_logname : "internal");
2235                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2236                                  XFS_ERRLEVEL_LOW, log->l_mp);
2237                 return XFS_ERROR(EFSCORRUPTED);
2238         }
2239
2240         mp = log->l_mp;
2241         buf_flags = XBF_LOCK;
2242         if (!(flags & XFS_BLF_INODE_BUF))
2243                 buf_flags |= XBF_MAPPED;
2244
2245         bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
2246         if (XFS_BUF_ISERROR(bp)) {
2247                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2248                                   bp, blkno);
2249                 error = XFS_BUF_GETERROR(bp);
2250                 xfs_buf_relse(bp);
2251                 return error;
2252         }
2253
2254         error = 0;
2255         if (flags & XFS_BLF_INODE_BUF) {
2256                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2257         } else if (flags &
2258                   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2259                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2260         } else {
2261                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2262         }
2263         if (error)
2264                 return XFS_ERROR(error);
2265
2266         /*
2267          * Perform delayed write on the buffer.  Asynchronous writes will be
2268          * slower when taking into account all the buffers to be flushed.
2269          *
2270          * Also make sure that only inode buffers with good sizes stay in
2271          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2272          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2273          * buffers in the log can be a different size if the log was generated
2274          * by an older kernel using unclustered inode buffers or a newer kernel
2275          * running with a different inode cluster size.  Regardless, if the
2276          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2277          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2278          * the buffer out of the buffer cache so that the buffer won't
2279          * overlap with future reads of those inodes.
2280          */
2281         if (XFS_DINODE_MAGIC ==
2282             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2283             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2284                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2285                 XFS_BUF_STALE(bp);
2286                 error = xfs_bwrite(mp, bp);
2287         } else {
2288                 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2289                 bp->b_mount = mp;
2290                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2291                 xfs_bdwrite(mp, bp);
2292         }
2293
2294         return (error);
2295 }
2296
2297 STATIC int
2298 xlog_recover_do_inode_trans(
2299         xlog_t                  *log,
2300         xlog_recover_item_t     *item,
2301         int                     pass)
2302 {
2303         xfs_inode_log_format_t  *in_f;
2304         xfs_mount_t             *mp;
2305         xfs_buf_t               *bp;
2306         xfs_dinode_t            *dip;
2307         xfs_ino_t               ino;
2308         int                     len;
2309         xfs_caddr_t             src;
2310         xfs_caddr_t             dest;
2311         int                     error;
2312         int                     attr_index;
2313         uint                    fields;
2314         xfs_icdinode_t          *dicp;
2315         int                     need_free = 0;
2316
2317         if (pass == XLOG_RECOVER_PASS1) {
2318                 return 0;
2319         }
2320
2321         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2322                 in_f = (xfs_inode_log_format_t *)item->ri_buf[0].i_addr;
2323         } else {
2324                 in_f = (xfs_inode_log_format_t *)kmem_alloc(
2325                         sizeof(xfs_inode_log_format_t), KM_SLEEP);
2326                 need_free = 1;
2327                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2328                 if (error)
2329                         goto error;
2330         }
2331         ino = in_f->ilf_ino;
2332         mp = log->l_mp;
2333
2334         /*
2335          * Inode buffers can be freed, look out for it,
2336          * and do not replay the inode.
2337          */
2338         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2339                                         in_f->ilf_len, 0)) {
2340                 error = 0;
2341                 trace_xfs_log_recover_inode_cancel(log, in_f);
2342                 goto error;
2343         }
2344         trace_xfs_log_recover_inode_recover(log, in_f);
2345
2346         bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2347                           XBF_LOCK);
2348         if (XFS_BUF_ISERROR(bp)) {
2349                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2350                                   bp, in_f->ilf_blkno);
2351                 error = XFS_BUF_GETERROR(bp);
2352                 xfs_buf_relse(bp);
2353                 goto error;
2354         }
2355         error = 0;
2356         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2357         dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2358
2359         /*
2360          * Make sure the place we're flushing out to really looks
2361          * like an inode!
2362          */
2363         if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2364                 xfs_buf_relse(bp);
2365                 xfs_fs_cmn_err(CE_ALERT, mp,
2366                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2367                         dip, bp, ino);
2368                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2369                                  XFS_ERRLEVEL_LOW, mp);
2370                 error = EFSCORRUPTED;
2371                 goto error;
2372         }
2373         dicp = (xfs_icdinode_t *)(item->ri_buf[1].i_addr);
2374         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2375                 xfs_buf_relse(bp);
2376                 xfs_fs_cmn_err(CE_ALERT, mp,
2377                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2378                         item, ino);
2379                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2380                                  XFS_ERRLEVEL_LOW, mp);
2381                 error = EFSCORRUPTED;
2382                 goto error;
2383         }
2384
2385         /* Skip replay when the on disk inode is newer than the log one */
2386         if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2387                 /*
2388                  * Deal with the wrap case, DI_MAX_FLUSH is less
2389                  * than smaller numbers
2390                  */
2391                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2392                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2393                         /* do nothing */
2394                 } else {
2395                         xfs_buf_relse(bp);
2396                         trace_xfs_log_recover_inode_skip(log, in_f);
2397                         error = 0;
2398                         goto error;
2399                 }
2400         }
2401         /* Take the opportunity to reset the flush iteration count */
2402         dicp->di_flushiter = 0;
2403
2404         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2405                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2406                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2407                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2408                                          XFS_ERRLEVEL_LOW, mp, dicp);
2409                         xfs_buf_relse(bp);
2410                         xfs_fs_cmn_err(CE_ALERT, mp,
2411                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2412                                 item, dip, bp, ino);
2413                         error = EFSCORRUPTED;
2414                         goto error;
2415                 }
2416         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2417                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2418                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2419                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2420                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2421                                              XFS_ERRLEVEL_LOW, mp, dicp);
2422                         xfs_buf_relse(bp);
2423                         xfs_fs_cmn_err(CE_ALERT, mp,
2424                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2425                                 item, dip, bp, ino);
2426                         error = EFSCORRUPTED;
2427                         goto error;
2428                 }
2429         }
2430         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2431                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2432                                      XFS_ERRLEVEL_LOW, mp, dicp);
2433                 xfs_buf_relse(bp);
2434                 xfs_fs_cmn_err(CE_ALERT, mp,
2435                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2436                         item, dip, bp, ino,
2437                         dicp->di_nextents + dicp->di_anextents,
2438                         dicp->di_nblocks);
2439                 error = EFSCORRUPTED;
2440                 goto error;
2441         }
2442         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2443                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2444                                      XFS_ERRLEVEL_LOW, mp, dicp);
2445                 xfs_buf_relse(bp);
2446                 xfs_fs_cmn_err(CE_ALERT, mp,
2447                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2448                         item, dip, bp, ino, dicp->di_forkoff);
2449                 error = EFSCORRUPTED;
2450                 goto error;
2451         }
2452         if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2453                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2454                                      XFS_ERRLEVEL_LOW, mp, dicp);
2455                 xfs_buf_relse(bp);
2456                 xfs_fs_cmn_err(CE_ALERT, mp,
2457                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2458                         item->ri_buf[1].i_len, item);
2459                 error = EFSCORRUPTED;
2460                 goto error;
2461         }
2462
2463         /* The core is in in-core format */
2464         xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr);
2465
2466         /* the rest is in on-disk format */
2467         if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2468                 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2469                         item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2470                         item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2471         }
2472
2473         fields = in_f->ilf_fields;
2474         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2475         case XFS_ILOG_DEV:
2476                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2477                 break;
2478         case XFS_ILOG_UUID:
2479                 memcpy(XFS_DFORK_DPTR(dip),
2480                        &in_f->ilf_u.ilfu_uuid,
2481                        sizeof(uuid_t));
2482                 break;
2483         }
2484
2485         if (in_f->ilf_size == 2)
2486                 goto write_inode_buffer;
2487         len = item->ri_buf[2].i_len;
2488         src = item->ri_buf[2].i_addr;
2489         ASSERT(in_f->ilf_size <= 4);
2490         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2491         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2492                (len == in_f->ilf_dsize));
2493
2494         switch (fields & XFS_ILOG_DFORK) {
2495         case XFS_ILOG_DDATA:
2496         case XFS_ILOG_DEXT:
2497                 memcpy(XFS_DFORK_DPTR(dip), src, len);
2498                 break;
2499
2500         case XFS_ILOG_DBROOT:
2501                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2502                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2503                                  XFS_DFORK_DSIZE(dip, mp));
2504                 break;
2505
2506         default:
2507                 /*
2508                  * There are no data fork flags set.
2509                  */
2510                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2511                 break;
2512         }
2513
2514         /*
2515          * If we logged any attribute data, recover it.  There may or
2516          * may not have been any other non-core data logged in this
2517          * transaction.
2518          */
2519         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2520                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2521                         attr_index = 3;
2522                 } else {
2523                         attr_index = 2;
2524                 }
2525                 len = item->ri_buf[attr_index].i_len;
2526                 src = item->ri_buf[attr_index].i_addr;
2527                 ASSERT(len == in_f->ilf_asize);
2528
2529                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2530                 case XFS_ILOG_ADATA:
2531                 case XFS_ILOG_AEXT:
2532                         dest = XFS_DFORK_APTR(dip);
2533                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2534                         memcpy(dest, src, len);
2535                         break;
2536
2537                 case XFS_ILOG_ABROOT:
2538                         dest = XFS_DFORK_APTR(dip);
2539                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2540                                          len, (xfs_bmdr_block_t*)dest,
2541                                          XFS_DFORK_ASIZE(dip, mp));
2542                         break;
2543
2544                 default:
2545                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2546                         ASSERT(0);
2547                         xfs_buf_relse(bp);
2548                         error = EIO;
2549                         goto error;
2550                 }
2551         }
2552
2553 write_inode_buffer:
2554         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2555         bp->b_mount = mp;
2556         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2557         xfs_bdwrite(mp, bp);
2558 error:
2559         if (need_free)
2560                 kmem_free(in_f);
2561         return XFS_ERROR(error);
2562 }
2563
2564 /*
2565  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2566  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2567  * of that type.
2568  */
2569 STATIC int
2570 xlog_recover_do_quotaoff_trans(
2571         xlog_t                  *log,
2572         xlog_recover_item_t     *item,
2573         int                     pass)
2574 {
2575         xfs_qoff_logformat_t    *qoff_f;
2576
2577         if (pass == XLOG_RECOVER_PASS2) {
2578                 return (0);
2579         }
2580
2581         qoff_f = (xfs_qoff_logformat_t *)item->ri_buf[0].i_addr;
2582         ASSERT(qoff_f);
2583
2584         /*
2585          * The logitem format's flag tells us if this was user quotaoff,
2586          * group/project quotaoff or both.
2587          */
2588         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2589                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2590         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2591                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2592         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2593                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2594
2595         return (0);
2596 }
2597
2598 /*
2599  * Recover a dquot record
2600  */
2601 STATIC int
2602 xlog_recover_do_dquot_trans(
2603         xlog_t                  *log,
2604         xlog_recover_item_t     *item,
2605         int                     pass)
2606 {
2607         xfs_mount_t             *mp;
2608         xfs_buf_t               *bp;
2609         struct xfs_disk_dquot   *ddq, *recddq;
2610         int                     error;
2611         xfs_dq_logformat_t      *dq_f;
2612         uint                    type;
2613
2614         if (pass == XLOG_RECOVER_PASS1) {
2615                 return 0;
2616         }
2617         mp = log->l_mp;
2618
2619         /*
2620          * Filesystems are required to send in quota flags at mount time.
2621          */
2622         if (mp->m_qflags == 0)
2623                 return (0);
2624
2625         recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
2626
2627         if (item->ri_buf[1].i_addr == NULL) {
2628                 cmn_err(CE_ALERT,
2629                         "XFS: NULL dquot in %s.", __func__);
2630                 return XFS_ERROR(EIO);
2631         }
2632         if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2633                 cmn_err(CE_ALERT,
2634                         "XFS: dquot too small (%d) in %s.",
2635                         item->ri_buf[1].i_len, __func__);
2636                 return XFS_ERROR(EIO);
2637         }
2638
2639         /*
2640          * This type of quotas was turned off, so ignore this record.
2641          */
2642         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2643         ASSERT(type);
2644         if (log->l_quotaoffs_flag & type)
2645                 return (0);
2646
2647         /*
2648          * At this point we know that quota was _not_ turned off.
2649          * Since the mount flags are not indicating to us otherwise, this
2650          * must mean that quota is on, and the dquot needs to be replayed.
2651          * Remember that we may not have fully recovered the superblock yet,
2652          * so we can't do the usual trick of looking at the SB quota bits.
2653          *
2654          * The other possibility, of course, is that the quota subsystem was
2655          * removed since the last mount - ENOSYS.
2656          */
2657         dq_f = (xfs_dq_logformat_t *)item->ri_buf[0].i_addr;
2658         ASSERT(dq_f);
2659         if ((error = xfs_qm_dqcheck(recddq,
2660                            dq_f->qlf_id,
2661                            0, XFS_QMOPT_DOWARN,
2662                            "xlog_recover_do_dquot_trans (log copy)"))) {
2663                 return XFS_ERROR(EIO);
2664         }
2665         ASSERT(dq_f->qlf_len == 1);
2666
2667         error = xfs_read_buf(mp, mp->m_ddev_targp,
2668                              dq_f->qlf_blkno,
2669                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2670                              0, &bp);
2671         if (error) {
2672                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2673                                   bp, dq_f->qlf_blkno);
2674                 return error;
2675         }
2676         ASSERT(bp);
2677         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2678
2679         /*
2680          * At least the magic num portion should be on disk because this
2681          * was among a chunk of dquots created earlier, and we did some
2682          * minimal initialization then.
2683          */
2684         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2685                            "xlog_recover_do_dquot_trans")) {
2686                 xfs_buf_relse(bp);
2687                 return XFS_ERROR(EIO);
2688         }
2689
2690         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2691
2692         ASSERT(dq_f->qlf_size == 2);
2693         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2694         bp->b_mount = mp;
2695         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2696         xfs_bdwrite(mp, bp);
2697
2698         return (0);
2699 }
2700
2701 /*
2702  * This routine is called to create an in-core extent free intent
2703  * item from the efi format structure which was logged on disk.
2704  * It allocates an in-core efi, copies the extents from the format
2705  * structure into it, and adds the efi to the AIL with the given
2706  * LSN.
2707  */
2708 STATIC int
2709 xlog_recover_do_efi_trans(
2710         xlog_t                  *log,
2711         xlog_recover_item_t     *item,
2712         xfs_lsn_t               lsn,
2713         int                     pass)
2714 {
2715         int                     error;
2716         xfs_mount_t             *mp;
2717         xfs_efi_log_item_t      *efip;
2718         xfs_efi_log_format_t    *efi_formatp;
2719
2720         if (pass == XLOG_RECOVER_PASS1) {
2721                 return 0;
2722         }
2723
2724         efi_formatp = (xfs_efi_log_format_t *)item->ri_buf[0].i_addr;
2725
2726         mp = log->l_mp;
2727         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2728         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2729                                          &(efip->efi_format)))) {
2730                 xfs_efi_item_free(efip);
2731                 return error;
2732         }
2733         efip->efi_next_extent = efi_formatp->efi_nextents;
2734         efip->efi_flags |= XFS_EFI_COMMITTED;
2735
2736         spin_lock(&log->l_ailp->xa_lock);
2737         /*
2738          * xfs_trans_ail_update() drops the AIL lock.
2739          */
2740         xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
2741         return 0;
2742 }
2743
2744
2745 /*
2746  * This routine is called when an efd format structure is found in
2747  * a committed transaction in the log.  It's purpose is to cancel
2748  * the corresponding efi if it was still in the log.  To do this
2749  * it searches the AIL for the efi with an id equal to that in the
2750  * efd format structure.  If we find it, we remove the efi from the
2751  * AIL and free it.
2752  */
2753 STATIC void
2754 xlog_recover_do_efd_trans(
2755         xlog_t                  *log,
2756         xlog_recover_item_t     *item,
2757         int                     pass)
2758 {
2759         xfs_efd_log_format_t    *efd_formatp;
2760         xfs_efi_log_item_t      *efip = NULL;
2761         xfs_log_item_t          *lip;
2762         __uint64_t              efi_id;
2763         struct xfs_ail_cursor   cur;
2764         struct xfs_ail          *ailp = log->l_ailp;
2765
2766         if (pass == XLOG_RECOVER_PASS1) {
2767                 return;
2768         }
2769
2770         efd_formatp = (xfs_efd_log_format_t *)item->ri_buf[0].i_addr;
2771         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2772                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2773                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2774                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2775         efi_id = efd_formatp->efd_efi_id;
2776
2777         /*
2778          * Search for the efi with the id in the efd format structure
2779          * in the AIL.
2780          */
2781         spin_lock(&ailp->xa_lock);
2782         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2783         while (lip != NULL) {
2784                 if (lip->li_type == XFS_LI_EFI) {
2785                         efip = (xfs_efi_log_item_t *)lip;
2786                         if (efip->efi_format.efi_id == efi_id) {
2787                                 /*
2788                                  * xfs_trans_ail_delete() drops the
2789                                  * AIL lock.
2790                                  */
2791                                 xfs_trans_ail_delete(ailp, lip);
2792                                 xfs_efi_item_free(efip);
2793                                 spin_lock(&ailp->xa_lock);
2794                                 break;
2795                         }
2796                 }
2797                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2798         }
2799         xfs_trans_ail_cursor_done(ailp, &cur);
2800         spin_unlock(&ailp->xa_lock);
2801 }
2802
2803 /*
2804  * Perform the transaction
2805  *
2806  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2807  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2808  */
2809 STATIC int
2810 xlog_recover_do_trans(
2811         xlog_t                  *log,
2812         xlog_recover_t          *trans,
2813         int                     pass)
2814 {
2815         int                     error = 0;
2816         xlog_recover_item_t     *item;
2817
2818         error = xlog_recover_reorder_trans(log, trans, pass);
2819         if (error)
2820                 return error;
2821
2822         list_for_each_entry(item, &trans->r_itemq, ri_list) {
2823                 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2824                 switch (ITEM_TYPE(item)) {
2825                 case XFS_LI_BUF:
2826                         error = xlog_recover_do_buffer_trans(log, item, pass);
2827                         break;
2828                 case XFS_LI_INODE:
2829                         error = xlog_recover_do_inode_trans(log, item, pass);
2830                         break;
2831                 case XFS_LI_EFI:
2832                         error = xlog_recover_do_efi_trans(log, item,
2833                                                           trans->r_lsn, pass);
2834                         break;
2835                 case XFS_LI_EFD:
2836                         xlog_recover_do_efd_trans(log, item, pass);
2837                         error = 0;
2838                         break;
2839                 case XFS_LI_DQUOT:
2840                         error = xlog_recover_do_dquot_trans(log, item, pass);
2841                         break;
2842                 case XFS_LI_QUOTAOFF:
2843                         error = xlog_recover_do_quotaoff_trans(log, item,
2844                                                                pass);
2845                         break;
2846                 default:
2847                         xlog_warn(
2848         "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2849                         ASSERT(0);
2850                         error = XFS_ERROR(EIO);
2851                         break;
2852                 }
2853
2854                 if (error)
2855                         return error;
2856         }
2857
2858         return 0;
2859 }
2860
2861 /*
2862  * Free up any resources allocated by the transaction
2863  *
2864  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2865  */
2866 STATIC void
2867 xlog_recover_free_trans(
2868         xlog_recover_t          *trans)
2869 {
2870         xlog_recover_item_t     *item, *n;
2871         int                     i;
2872
2873         list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2874                 /* Free the regions in the item. */
2875                 list_del(&item->ri_list);
2876                 for (i = 0; i < item->ri_cnt; i++)
2877                         kmem_free(item->ri_buf[i].i_addr);
2878                 /* Free the item itself */
2879                 kmem_free(item->ri_buf);
2880                 kmem_free(item);
2881         }
2882         /* Free the transaction recover structure */
2883         kmem_free(trans);
2884 }
2885
2886 STATIC int
2887 xlog_recover_commit_trans(
2888         xlog_t                  *log,
2889         xlog_recover_t          *trans,
2890         int                     pass)
2891 {
2892         int                     error;
2893
2894         hlist_del(&trans->r_list);
2895         if ((error = xlog_recover_do_trans(log, trans, pass)))
2896                 return error;
2897         xlog_recover_free_trans(trans);                 /* no error */
2898         return 0;
2899 }
2900
2901 STATIC int
2902 xlog_recover_unmount_trans(
2903         xlog_recover_t          *trans)
2904 {
2905         /* Do nothing now */
2906         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2907         return 0;
2908 }
2909
2910 /*
2911  * There are two valid states of the r_state field.  0 indicates that the
2912  * transaction structure is in a normal state.  We have either seen the
2913  * start of the transaction or the last operation we added was not a partial
2914  * operation.  If the last operation we added to the transaction was a
2915  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2916  *
2917  * NOTE: skip LRs with 0 data length.
2918  */
2919 STATIC int
2920 xlog_recover_process_data(
2921         xlog_t                  *log,
2922         struct hlist_head       rhash[],
2923         xlog_rec_header_t       *rhead,
2924         xfs_caddr_t             dp,
2925         int                     pass)
2926 {
2927         xfs_caddr_t             lp;
2928         int                     num_logops;
2929         xlog_op_header_t        *ohead;
2930         xlog_recover_t          *trans;
2931         xlog_tid_t              tid;
2932         int                     error;
2933         unsigned long           hash;
2934         uint                    flags;
2935
2936         lp = dp + be32_to_cpu(rhead->h_len);
2937         num_logops = be32_to_cpu(rhead->h_num_logops);
2938
2939         /* check the log format matches our own - else we can't recover */
2940         if (xlog_header_check_recover(log->l_mp, rhead))
2941                 return (XFS_ERROR(EIO));
2942
2943         while ((dp < lp) && num_logops) {
2944                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2945                 ohead = (xlog_op_header_t *)dp;
2946                 dp += sizeof(xlog_op_header_t);
2947                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2948                     ohead->oh_clientid != XFS_LOG) {
2949                         xlog_warn(
2950                 "XFS: xlog_recover_process_data: bad clientid");
2951                         ASSERT(0);
2952                         return (XFS_ERROR(EIO));
2953                 }
2954                 tid = be32_to_cpu(ohead->oh_tid);
2955                 hash = XLOG_RHASH(tid);
2956                 trans = xlog_recover_find_tid(&rhash[hash], tid);
2957                 if (trans == NULL) {               /* not found; add new tid */
2958                         if (ohead->oh_flags & XLOG_START_TRANS)
2959                                 xlog_recover_new_tid(&rhash[hash], tid,
2960                                         be64_to_cpu(rhead->h_lsn));
2961                 } else {
2962                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2963                                 xlog_warn(
2964                         "XFS: xlog_recover_process_data: bad length");
2965                                 WARN_ON(1);
2966                                 return (XFS_ERROR(EIO));
2967                         }
2968                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2969                         if (flags & XLOG_WAS_CONT_TRANS)
2970                                 flags &= ~XLOG_CONTINUE_TRANS;
2971                         switch (flags) {
2972                         case XLOG_COMMIT_TRANS:
2973                                 error = xlog_recover_commit_trans(log,
2974                                                                 trans, pass);
2975                                 break;
2976                         case XLOG_UNMOUNT_TRANS:
2977                                 error = xlog_recover_unmount_trans(trans);
2978                                 break;
2979                         case XLOG_WAS_CONT_TRANS:
2980                                 error = xlog_recover_add_to_cont_trans(log,
2981                                                 trans, dp,
2982                                                 be32_to_cpu(ohead->oh_len));
2983                                 break;
2984                         case XLOG_START_TRANS:
2985                                 xlog_warn(
2986                         "XFS: xlog_recover_process_data: bad transaction");
2987                                 ASSERT(0);
2988                                 error = XFS_ERROR(EIO);
2989                                 break;
2990                         case 0:
2991                         case XLOG_CONTINUE_TRANS:
2992                                 error = xlog_recover_add_to_trans(log, trans,
2993                                                 dp, be32_to_cpu(ohead->oh_len));
2994                                 break;
2995                         default:
2996                                 xlog_warn(
2997                         "XFS: xlog_recover_process_data: bad flag");
2998                                 ASSERT(0);
2999                                 error = XFS_ERROR(EIO);
3000                                 break;
3001                         }
3002                         if (error)
3003                                 return error;
3004                 }
3005                 dp += be32_to_cpu(ohead->oh_len);
3006                 num_logops--;
3007         }
3008         return 0;
3009 }
3010
3011 /*
3012  * Process an extent free intent item that was recovered from
3013  * the log.  We need to free the extents that it describes.
3014  */
3015 STATIC int
3016 xlog_recover_process_efi(
3017         xfs_mount_t             *mp,
3018         xfs_efi_log_item_t      *efip)
3019 {
3020         xfs_efd_log_item_t      *efdp;
3021         xfs_trans_t             *tp;
3022         int                     i;
3023         int                     error = 0;
3024         xfs_extent_t            *extp;
3025         xfs_fsblock_t           startblock_fsb;
3026
3027         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3028
3029         /*
3030          * First check the validity of the extents described by the
3031          * EFI.  If any are bad, then assume that all are bad and
3032          * just toss the EFI.
3033          */
3034         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3035                 extp = &(efip->efi_format.efi_extents[i]);
3036                 startblock_fsb = XFS_BB_TO_FSB(mp,
3037                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3038                 if ((startblock_fsb == 0) ||
3039                     (extp->ext_len == 0) ||
3040                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3041                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3042                         /*
3043                          * This will pull the EFI from the AIL and
3044                          * free the memory associated with it.
3045                          */
3046                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3047                         return XFS_ERROR(EIO);
3048                 }
3049         }
3050
3051         tp = xfs_trans_alloc(mp, 0);
3052         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3053         if (error)
3054                 goto abort_error;
3055         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3056
3057         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3058                 extp = &(efip->efi_format.efi_extents[i]);
3059                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3060                 if (error)
3061                         goto abort_error;
3062                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3063                                          extp->ext_len);
3064         }
3065
3066         efip->efi_flags |= XFS_EFI_RECOVERED;
3067         error = xfs_trans_commit(tp, 0);
3068         return error;
3069
3070 abort_error:
3071         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3072         return error;
3073 }
3074
3075 /*
3076  * When this is called, all of the EFIs which did not have
3077  * corresponding EFDs should be in the AIL.  What we do now
3078  * is free the extents associated with each one.
3079  *
3080  * Since we process the EFIs in normal transactions, they
3081  * will be removed at some point after the commit.  This prevents
3082  * us from just walking down the list processing each one.
3083  * We'll use a flag in the EFI to skip those that we've already
3084  * processed and use the AIL iteration mechanism's generation
3085  * count to try to speed this up at least a bit.
3086  *
3087  * When we start, we know that the EFIs are the only things in
3088  * the AIL.  As we process them, however, other items are added
3089  * to the AIL.  Since everything added to the AIL must come after
3090  * everything already in the AIL, we stop processing as soon as
3091  * we see something other than an EFI in the AIL.
3092  */
3093 STATIC int
3094 xlog_recover_process_efis(
3095         xlog_t                  *log)
3096 {
3097         xfs_log_item_t          *lip;
3098         xfs_efi_log_item_t      *efip;
3099         int                     error = 0;
3100         struct xfs_ail_cursor   cur;
3101         struct xfs_ail          *ailp;
3102
3103         ailp = log->l_ailp;
3104         spin_lock(&ailp->xa_lock);
3105         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3106         while (lip != NULL) {
3107                 /*
3108                  * We're done when we see something other than an EFI.
3109                  * There should be no EFIs left in the AIL now.
3110                  */
3111                 if (lip->li_type != XFS_LI_EFI) {
3112 #ifdef DEBUG
3113                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3114                                 ASSERT(lip->li_type != XFS_LI_EFI);
3115 #endif
3116                         break;
3117                 }
3118
3119                 /*
3120                  * Skip EFIs that we've already processed.
3121                  */
3122                 efip = (xfs_efi_log_item_t *)lip;
3123                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3124                         lip = xfs_trans_ail_cursor_next(ailp, &cur);
3125                         continue;
3126                 }
3127
3128                 spin_unlock(&ailp->xa_lock);
3129                 error = xlog_recover_process_efi(log->l_mp, efip);
3130                 spin_lock(&ailp->xa_lock);
3131                 if (error)
3132                         goto out;
3133                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3134         }
3135 out:
3136         xfs_trans_ail_cursor_done(ailp, &cur);
3137         spin_unlock(&ailp->xa_lock);
3138         return error;
3139 }
3140
3141 /*
3142  * This routine performs a transaction to null out a bad inode pointer
3143  * in an agi unlinked inode hash bucket.
3144  */
3145 STATIC void
3146 xlog_recover_clear_agi_bucket(
3147         xfs_mount_t     *mp,
3148         xfs_agnumber_t  agno,
3149         int             bucket)
3150 {
3151         xfs_trans_t     *tp;
3152         xfs_agi_t       *agi;
3153         xfs_buf_t       *agibp;
3154         int             offset;
3155         int             error;
3156
3157         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3158         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3159                                   0, 0, 0);
3160         if (error)
3161                 goto out_abort;
3162
3163         error = xfs_read_agi(mp, tp, agno, &agibp);
3164         if (error)
3165                 goto out_abort;
3166
3167         agi = XFS_BUF_TO_AGI(agibp);
3168         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3169         offset = offsetof(xfs_agi_t, agi_unlinked) +
3170                  (sizeof(xfs_agino_t) * bucket);
3171         xfs_trans_log_buf(tp, agibp, offset,
3172                           (offset + sizeof(xfs_agino_t) - 1));
3173
3174         error = xfs_trans_commit(tp, 0);
3175         if (error)
3176                 goto out_error;
3177         return;
3178
3179 out_abort:
3180         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3181 out_error:
3182         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3183                         "failed to clear agi %d. Continuing.", agno);
3184         return;
3185 }
3186
3187 STATIC xfs_agino_t
3188 xlog_recover_process_one_iunlink(
3189         struct xfs_mount                *mp,
3190         xfs_agnumber_t                  agno,
3191         xfs_agino_t                     agino,
3192         int                             bucket)
3193 {
3194         struct xfs_buf                  *ibp;
3195         struct xfs_dinode               *dip;
3196         struct xfs_inode                *ip;
3197         xfs_ino_t                       ino;
3198         int                             error;
3199
3200         ino = XFS_AGINO_TO_INO(mp, agno, agino);
3201         error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0);
3202         if (error)
3203                 goto fail;
3204
3205         /*
3206          * Get the on disk inode to find the next inode in the bucket.
3207          */
3208         error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
3209         if (error)
3210                 goto fail_iput;
3211
3212         ASSERT(ip->i_d.di_nlink == 0);
3213         ASSERT(ip->i_d.di_mode != 0);
3214
3215         /* setup for the next pass */
3216         agino = be32_to_cpu(dip->di_next_unlinked);
3217         xfs_buf_relse(ibp);
3218
3219         /*
3220          * Prevent any DMAPI event from being sent when the reference on
3221          * the inode is dropped.
3222          */
3223         ip->i_d.di_dmevmask = 0;
3224
3225         IRELE(ip);
3226         return agino;
3227
3228  fail_iput:
3229         IRELE(ip);
3230  fail:
3231         /*
3232          * We can't read in the inode this bucket points to, or this inode
3233          * is messed up.  Just ditch this bucket of inodes.  We will lose
3234          * some inodes and space, but at least we won't hang.
3235          *
3236          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3237          * clear the inode pointer in the bucket.
3238          */
3239         xlog_recover_clear_agi_bucket(mp, agno, bucket);
3240         return NULLAGINO;
3241 }
3242
3243 /*
3244  * xlog_iunlink_recover
3245  *
3246  * This is called during recovery to process any inodes which
3247  * we unlinked but not freed when the system crashed.  These
3248  * inodes will be on the lists in the AGI blocks.  What we do
3249  * here is scan all the AGIs and fully truncate and free any
3250  * inodes found on the lists.  Each inode is removed from the
3251  * lists when it has been fully truncated and is freed.  The
3252  * freeing of the inode and its removal from the list must be
3253  * atomic.
3254  */
3255 STATIC void
3256 xlog_recover_process_iunlinks(
3257         xlog_t          *log)
3258 {
3259         xfs_mount_t     *mp;
3260         xfs_agnumber_t  agno;
3261         xfs_agi_t       *agi;
3262         xfs_buf_t       *agibp;
3263         xfs_agino_t     agino;
3264         int             bucket;
3265         int             error;
3266         uint            mp_dmevmask;
3267
3268         mp = log->l_mp;
3269
3270         /*
3271          * Prevent any DMAPI event from being sent while in this function.
3272          */
3273         mp_dmevmask = mp->m_dmevmask;
3274         mp->m_dmevmask = 0;
3275
3276         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3277                 /*
3278                  * Find the agi for this ag.
3279                  */
3280                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3281                 if (error) {
3282                         /*
3283                          * AGI is b0rked. Don't process it.
3284                          *
3285                          * We should probably mark the filesystem as corrupt
3286                          * after we've recovered all the ag's we can....
3287                          */
3288                         continue;
3289                 }
3290                 agi = XFS_BUF_TO_AGI(agibp);
3291
3292                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3293                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3294                         while (agino != NULLAGINO) {
3295                                 /*
3296                                  * Release the agi buffer so that it can
3297                                  * be acquired in the normal course of the
3298                                  * transaction to truncate and free the inode.
3299                                  */
3300                                 xfs_buf_relse(agibp);
3301
3302                                 agino = xlog_recover_process_one_iunlink(mp,
3303                                                         agno, agino, bucket);
3304
3305                                 /*
3306                                  * Reacquire the agibuffer and continue around
3307                                  * the loop. This should never fail as we know
3308                                  * the buffer was good earlier on.
3309                                  */
3310                                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3311                                 ASSERT(error == 0);
3312                                 agi = XFS_BUF_TO_AGI(agibp);
3313                         }
3314                 }
3315
3316                 /*
3317                  * Release the buffer for the current agi so we can
3318                  * go on to the next one.
3319                  */
3320                 xfs_buf_relse(agibp);
3321         }
3322
3323         mp->m_dmevmask = mp_dmevmask;
3324 }
3325
3326
3327 #ifdef DEBUG
3328 STATIC void
3329 xlog_pack_data_checksum(
3330         xlog_t          *log,
3331         xlog_in_core_t  *iclog,
3332         int             size)
3333 {
3334         int             i;
3335         __be32          *up;
3336         uint            chksum = 0;
3337
3338         up = (__be32 *)iclog->ic_datap;
3339         /* divide length by 4 to get # words */
3340         for (i = 0; i < (size >> 2); i++) {
3341                 chksum ^= be32_to_cpu(*up);
3342                 up++;
3343         }
3344         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3345 }
3346 #else
3347 #define xlog_pack_data_checksum(log, iclog, size)
3348 #endif
3349
3350 /*
3351  * Stamp cycle number in every block
3352  */
3353 void
3354 xlog_pack_data(
3355         xlog_t                  *log,
3356         xlog_in_core_t          *iclog,
3357         int                     roundoff)
3358 {
3359         int                     i, j, k;
3360         int                     size = iclog->ic_offset + roundoff;
3361         __be32                  cycle_lsn;
3362         xfs_caddr_t             dp;
3363
3364         xlog_pack_data_checksum(log, iclog, size);
3365
3366         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3367
3368         dp = iclog->ic_datap;
3369         for (i = 0; i < BTOBB(size) &&
3370                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3371                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3372                 *(__be32 *)dp = cycle_lsn;
3373                 dp += BBSIZE;
3374         }
3375
3376         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3377                 xlog_in_core_2_t *xhdr = iclog->ic_data;
3378
3379                 for ( ; i < BTOBB(size); i++) {
3380                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3381                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3382                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3383                         *(__be32 *)dp = cycle_lsn;
3384                         dp += BBSIZE;
3385                 }
3386
3387                 for (i = 1; i < log->l_iclog_heads; i++) {
3388                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3389                 }
3390         }
3391 }
3392
3393 STATIC void
3394 xlog_unpack_data(
3395         xlog_rec_header_t       *rhead,
3396         xfs_caddr_t             dp,
3397         xlog_t                  *log)
3398 {
3399         int                     i, j, k;
3400
3401         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3402                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3403                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3404                 dp += BBSIZE;
3405         }
3406
3407         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3408                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3409                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3410                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3411                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3412                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3413                         dp += BBSIZE;
3414                 }
3415         }
3416 }
3417
3418 STATIC int
3419 xlog_valid_rec_header(
3420         xlog_t                  *log,
3421         xlog_rec_header_t       *rhead,
3422         xfs_daddr_t             blkno)
3423 {
3424         int                     hlen;
3425
3426         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3427                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3428                                 XFS_ERRLEVEL_LOW, log->l_mp);
3429                 return XFS_ERROR(EFSCORRUPTED);
3430         }
3431         if (unlikely(
3432             (!rhead->h_version ||
3433             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3434                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3435                         __func__, be32_to_cpu(rhead->h_version));
3436                 return XFS_ERROR(EIO);
3437         }
3438
3439         /* LR body must have data or it wouldn't have been written */
3440         hlen = be32_to_cpu(rhead->h_len);
3441         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3442                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3443                                 XFS_ERRLEVEL_LOW, log->l_mp);
3444                 return XFS_ERROR(EFSCORRUPTED);
3445         }
3446         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3447                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3448                                 XFS_ERRLEVEL_LOW, log->l_mp);
3449                 return XFS_ERROR(EFSCORRUPTED);
3450         }
3451         return 0;
3452 }
3453
3454 /*
3455  * Read the log from tail to head and process the log records found.
3456  * Handle the two cases where the tail and head are in the same cycle
3457  * and where the active portion of the log wraps around the end of
3458  * the physical log separately.  The pass parameter is passed through
3459  * to the routines called to process the data and is not looked at
3460  * here.
3461  */
3462 STATIC int
3463 xlog_do_recovery_pass(
3464         xlog_t                  *log,
3465         xfs_daddr_t             head_blk,
3466         xfs_daddr_t             tail_blk,
3467         int                     pass)
3468 {
3469         xlog_rec_header_t       *rhead;
3470         xfs_daddr_t             blk_no;
3471         xfs_caddr_t             offset;
3472         xfs_buf_t               *hbp, *dbp;
3473         int                     error = 0, h_size;
3474         int                     bblks, split_bblks;
3475         int                     hblks, split_hblks, wrapped_hblks;
3476         struct hlist_head       rhash[XLOG_RHASH_SIZE];
3477
3478         ASSERT(head_blk != tail_blk);
3479
3480         /*
3481          * Read the header of the tail block and get the iclog buffer size from
3482          * h_size.  Use this to tell how many sectors make up the log header.
3483          */
3484         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3485                 /*
3486                  * When using variable length iclogs, read first sector of
3487                  * iclog header and extract the header size from it.  Get a
3488                  * new hbp that is the correct size.
3489                  */
3490                 hbp = xlog_get_bp(log, 1);
3491                 if (!hbp)
3492                         return ENOMEM;
3493
3494                 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3495                 if (error)
3496                         goto bread_err1;
3497
3498                 rhead = (xlog_rec_header_t *)offset;
3499                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3500                 if (error)
3501                         goto bread_err1;
3502                 h_size = be32_to_cpu(rhead->h_size);
3503                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3504                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3505                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3506                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3507                                 hblks++;
3508                         xlog_put_bp(hbp);
3509                         hbp = xlog_get_bp(log, hblks);
3510                 } else {
3511                         hblks = 1;
3512                 }
3513         } else {
3514                 ASSERT(log->l_sectBBsize == 1);
3515                 hblks = 1;
3516                 hbp = xlog_get_bp(log, 1);
3517                 h_size = XLOG_BIG_RECORD_BSIZE;
3518         }
3519
3520         if (!hbp)
3521                 return ENOMEM;
3522         dbp = xlog_get_bp(log, BTOBB(h_size));
3523         if (!dbp) {
3524                 xlog_put_bp(hbp);
3525                 return ENOMEM;
3526         }
3527
3528         memset(rhash, 0, sizeof(rhash));
3529         if (tail_blk <= head_blk) {
3530                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3531                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3532                         if (error)
3533                                 goto bread_err2;
3534
3535                         rhead = (xlog_rec_header_t *)offset;
3536                         error = xlog_valid_rec_header(log, rhead, blk_no);
3537                         if (error)
3538                                 goto bread_err2;
3539
3540                         /* blocks in data section */
3541                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3542                         error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3543                                            &offset);
3544                         if (error)
3545                                 goto bread_err2;
3546
3547                         xlog_unpack_data(rhead, offset, log);
3548                         if ((error = xlog_recover_process_data(log,
3549                                                 rhash, rhead, offset, pass)))
3550                                 goto bread_err2;
3551                         blk_no += bblks + hblks;
3552                 }
3553         } else {
3554                 /*
3555                  * Perform recovery around the end of the physical log.
3556                  * When the head is not on the same cycle number as the tail,
3557                  * we can't do a sequential recovery as above.
3558                  */
3559                 blk_no = tail_blk;
3560                 while (blk_no < log->l_logBBsize) {
3561                         /*
3562                          * Check for header wrapping around physical end-of-log
3563                          */
3564                         offset = XFS_BUF_PTR(hbp);
3565                         split_hblks = 0;
3566                         wrapped_hblks = 0;
3567                         if (blk_no + hblks <= log->l_logBBsize) {
3568                                 /* Read header in one read */
3569                                 error = xlog_bread(log, blk_no, hblks, hbp,
3570                                                    &offset);
3571                                 if (error)
3572                                         goto bread_err2;
3573                         } else {
3574                                 /* This LR is split across physical log end */
3575                                 if (blk_no != log->l_logBBsize) {
3576                                         /* some data before physical log end */
3577                                         ASSERT(blk_no <= INT_MAX);
3578                                         split_hblks = log->l_logBBsize - (int)blk_no;
3579                                         ASSERT(split_hblks > 0);
3580                                         error = xlog_bread(log, blk_no,
3581                                                            split_hblks, hbp,
3582                                                            &offset);
3583                                         if (error)
3584                                                 goto bread_err2;
3585                                 }
3586
3587                                 /*
3588                                  * Note: this black magic still works with
3589                                  * large sector sizes (non-512) only because:
3590                                  * - we increased the buffer size originally
3591                                  *   by 1 sector giving us enough extra space
3592                                  *   for the second read;
3593                                  * - the log start is guaranteed to be sector
3594                                  *   aligned;
3595                                  * - we read the log end (LR header start)
3596                                  *   _first_, then the log start (LR header end)
3597                                  *   - order is important.
3598                                  */
3599                                 wrapped_hblks = hblks - split_hblks;
3600                                 error = XFS_BUF_SET_PTR(hbp,
3601                                                 offset + BBTOB(split_hblks),
3602                                                 BBTOB(hblks - split_hblks));
3603                                 if (error)
3604                                         goto bread_err2;
3605
3606                                 error = xlog_bread_noalign(log, 0,
3607                                                            wrapped_hblks, hbp);
3608                                 if (error)
3609                                         goto bread_err2;
3610
3611                                 error = XFS_BUF_SET_PTR(hbp, offset,
3612                                                         BBTOB(hblks));
3613                                 if (error)
3614                                         goto bread_err2;
3615                         }
3616                         rhead = (xlog_rec_header_t *)offset;
3617                         error = xlog_valid_rec_header(log, rhead,
3618                                                 split_hblks ? blk_no : 0);
3619                         if (error)
3620                                 goto bread_err2;
3621
3622                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3623                         blk_no += hblks;
3624
3625                         /* Read in data for log record */
3626                         if (blk_no + bblks <= log->l_logBBsize) {
3627                                 error = xlog_bread(log, blk_no, bblks, dbp,
3628                                                    &offset);
3629                                 if (error)
3630                                         goto bread_err2;
3631                         } else {
3632                                 /* This log record is split across the
3633                                  * physical end of log */
3634                                 offset = XFS_BUF_PTR(dbp);
3635                                 split_bblks = 0;
3636                                 if (blk_no != log->l_logBBsize) {
3637                                         /* some data is before the physical
3638                                          * end of log */
3639                                         ASSERT(!wrapped_hblks);
3640                                         ASSERT(blk_no <= INT_MAX);
3641                                         split_bblks =
3642                                                 log->l_logBBsize - (int)blk_no;
3643                                         ASSERT(split_bblks > 0);
3644                                         error = xlog_bread(log, blk_no,
3645                                                         split_bblks, dbp,
3646                                                         &offset);
3647                                         if (error)
3648                                                 goto bread_err2;
3649                                 }
3650
3651                                 /*
3652                                  * Note: this black magic still works with
3653                                  * large sector sizes (non-512) only because:
3654                                  * - we increased the buffer size originally
3655                                  *   by 1 sector giving us enough extra space
3656                                  *   for the second read;
3657                                  * - the log start is guaranteed to be sector
3658                                  *   aligned;
3659                                  * - we read the log end (LR header start)
3660                                  *   _first_, then the log start (LR header end)
3661                                  *   - order is important.
3662                                  */
3663                                 error = XFS_BUF_SET_PTR(dbp,
3664                                                 offset + BBTOB(split_bblks),
3665                                                 BBTOB(bblks - split_bblks));
3666                                 if (error)
3667                                         goto bread_err2;
3668
3669                                 error = xlog_bread_noalign(log, wrapped_hblks,
3670                                                 bblks - split_bblks,
3671                                                 dbp);
3672                                 if (error)
3673                                         goto bread_err2;
3674
3675                                 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3676                                 if (error)
3677                                         goto bread_err2;
3678                         }
3679                         xlog_unpack_data(rhead, offset, log);
3680                         if ((error = xlog_recover_process_data(log, rhash,
3681                                                         rhead, offset, pass)))
3682                                 goto bread_err2;
3683                         blk_no += bblks;
3684                 }
3685
3686                 ASSERT(blk_no >= log->l_logBBsize);
3687                 blk_no -= log->l_logBBsize;
3688
3689                 /* read first part of physical log */
3690                 while (blk_no < head_blk) {
3691                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3692                         if (error)
3693                                 goto bread_err2;
3694
3695                         rhead = (xlog_rec_header_t *)offset;
3696                         error = xlog_valid_rec_header(log, rhead, blk_no);
3697                         if (error)
3698                                 goto bread_err2;
3699
3700                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3701                         error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3702                                            &offset);
3703                         if (error)
3704                                 goto bread_err2;
3705
3706                         xlog_unpack_data(rhead, offset, log);
3707                         if ((error = xlog_recover_process_data(log, rhash,
3708                                                         rhead, offset, pass)))
3709                                 goto bread_err2;
3710                         blk_no += bblks + hblks;
3711                 }
3712         }
3713
3714  bread_err2:
3715         xlog_put_bp(dbp);
3716  bread_err1:
3717         xlog_put_bp(hbp);
3718         return error;
3719 }
3720
3721 /*
3722  * Do the recovery of the log.  We actually do this in two phases.
3723  * The two passes are necessary in order to implement the function
3724  * of cancelling a record written into the log.  The first pass
3725  * determines those things which have been cancelled, and the
3726  * second pass replays log items normally except for those which
3727  * have been cancelled.  The handling of the replay and cancellations
3728  * takes place in the log item type specific routines.
3729  *
3730  * The table of items which have cancel records in the log is allocated
3731  * and freed at this level, since only here do we know when all of
3732  * the log recovery has been completed.
3733  */
3734 STATIC int
3735 xlog_do_log_recovery(
3736         xlog_t          *log,
3737         xfs_daddr_t     head_blk,
3738         xfs_daddr_t     tail_blk)
3739 {
3740         int             error;
3741
3742         ASSERT(head_blk != tail_blk);
3743
3744         /*
3745          * First do a pass to find all of the cancelled buf log items.
3746          * Store them in the buf_cancel_table for use in the second pass.
3747          */
3748         log->l_buf_cancel_table =
3749                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3750                                                  sizeof(xfs_buf_cancel_t*),
3751                                                  KM_SLEEP);
3752         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3753                                       XLOG_RECOVER_PASS1);
3754         if (error != 0) {
3755                 kmem_free(log->l_buf_cancel_table);
3756                 log->l_buf_cancel_table = NULL;
3757                 return error;
3758         }
3759         /*
3760          * Then do a second pass to actually recover the items in the log.
3761          * When it is complete free the table of buf cancel items.
3762          */
3763         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3764                                       XLOG_RECOVER_PASS2);
3765 #ifdef DEBUG
3766         if (!error) {
3767                 int     i;
3768
3769                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3770                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3771         }
3772 #endif  /* DEBUG */
3773
3774         kmem_free(log->l_buf_cancel_table);
3775         log->l_buf_cancel_table = NULL;
3776
3777         return error;
3778 }
3779
3780 /*
3781  * Do the actual recovery
3782  */
3783 STATIC int
3784 xlog_do_recover(
3785         xlog_t          *log,
3786         xfs_daddr_t     head_blk,
3787         xfs_daddr_t     tail_blk)
3788 {
3789         int             error;
3790         xfs_buf_t       *bp;
3791         xfs_sb_t        *sbp;
3792
3793         /*
3794          * First replay the images in the log.
3795          */
3796         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3797         if (error) {
3798                 return error;
3799         }
3800
3801         XFS_bflush(log->l_mp->m_ddev_targp);
3802
3803         /*
3804          * If IO errors happened during recovery, bail out.
3805          */
3806         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3807                 return (EIO);
3808         }
3809
3810         /*
3811          * We now update the tail_lsn since much of the recovery has completed
3812          * and there may be space available to use.  If there were no extent
3813          * or iunlinks, we can free up the entire log and set the tail_lsn to
3814          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3815          * lsn of the last known good LR on disk.  If there are extent frees
3816          * or iunlinks they will have some entries in the AIL; so we look at
3817          * the AIL to determine how to set the tail_lsn.
3818          */
3819         xlog_assign_tail_lsn(log->l_mp);
3820
3821         /*
3822          * Now that we've finished replaying all buffer and inode
3823          * updates, re-read in the superblock.
3824          */
3825         bp = xfs_getsb(log->l_mp, 0);
3826         XFS_BUF_UNDONE(bp);
3827         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3828         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3829         XFS_BUF_READ(bp);
3830         XFS_BUF_UNASYNC(bp);
3831         xfsbdstrat(log->l_mp, bp);
3832         error = xfs_iowait(bp);
3833         if (error) {
3834                 xfs_ioerror_alert("xlog_do_recover",
3835                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3836                 ASSERT(0);
3837                 xfs_buf_relse(bp);
3838                 return error;
3839         }
3840
3841         /* Convert superblock from on-disk format */
3842         sbp = &log->l_mp->m_sb;
3843         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3844         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3845         ASSERT(xfs_sb_good_version(sbp));
3846         xfs_buf_relse(bp);
3847
3848         /* We've re-read the superblock so re-initialize per-cpu counters */
3849         xfs_icsb_reinit_counters(log->l_mp);
3850
3851         xlog_recover_check_summary(log);
3852
3853         /* Normal transactions can now occur */
3854         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3855         return 0;
3856 }
3857
3858 /*
3859  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3860  *
3861  * Return error or zero.
3862  */
3863 int
3864 xlog_recover(
3865         xlog_t          *log)
3866 {
3867         xfs_daddr_t     head_blk, tail_blk;
3868         int             error;
3869
3870         /* find the tail of the log */
3871         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3872                 return error;
3873
3874         if (tail_blk != head_blk) {
3875                 /* There used to be a comment here:
3876                  *
3877                  * disallow recovery on read-only mounts.  note -- mount
3878                  * checks for ENOSPC and turns it into an intelligent
3879                  * error message.
3880                  * ...but this is no longer true.  Now, unless you specify
3881                  * NORECOVERY (in which case this function would never be
3882                  * called), we just go ahead and recover.  We do this all
3883                  * under the vfs layer, so we can get away with it unless
3884                  * the device itself is read-only, in which case we fail.
3885                  */
3886                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3887                         return error;
3888                 }
3889
3890                 cmn_err(CE_NOTE,
3891                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3892                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3893                         log->l_mp->m_logname : "internal");
3894
3895                 error = xlog_do_recover(log, head_blk, tail_blk);
3896                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3897         }
3898         return error;
3899 }
3900
3901 /*
3902  * In the first part of recovery we replay inodes and buffers and build
3903  * up the list of extent free items which need to be processed.  Here
3904  * we process the extent free items and clean up the on disk unlinked
3905  * inode lists.  This is separated from the first part of recovery so
3906  * that the root and real-time bitmap inodes can be read in from disk in
3907  * between the two stages.  This is necessary so that we can free space
3908  * in the real-time portion of the file system.
3909  */
3910 int
3911 xlog_recover_finish(
3912         xlog_t          *log)
3913 {
3914         /*
3915          * Now we're ready to do the transactions needed for the
3916          * rest of recovery.  Start with completing all the extent
3917          * free intent records and then process the unlinked inode
3918          * lists.  At this point, we essentially run in normal mode
3919          * except that we're still performing recovery actions
3920          * rather than accepting new requests.
3921          */
3922         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3923                 int     error;
3924                 error = xlog_recover_process_efis(log);
3925                 if (error) {
3926                         cmn_err(CE_ALERT,
3927                                 "Failed to recover EFIs on filesystem: %s",
3928                                 log->l_mp->m_fsname);
3929                         return error;
3930                 }
3931                 /*
3932                  * Sync the log to get all the EFIs out of the AIL.
3933                  * This isn't absolutely necessary, but it helps in
3934                  * case the unlink transactions would have problems
3935                  * pushing the EFIs out of the way.
3936                  */
3937                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3938
3939                 xlog_recover_process_iunlinks(log);
3940
3941                 xlog_recover_check_summary(log);
3942
3943                 cmn_err(CE_NOTE,
3944                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3945                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3946                         log->l_mp->m_logname : "internal");
3947                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3948         } else {
3949                 cmn_err(CE_DEBUG,
3950                         "!Ending clean XFS mount for filesystem: %s\n",
3951                         log->l_mp->m_fsname);
3952         }
3953         return 0;
3954 }
3955
3956
3957 #if defined(DEBUG)
3958 /*
3959  * Read all of the agf and agi counters and check that they
3960  * are consistent with the superblock counters.
3961  */
3962 void
3963 xlog_recover_check_summary(
3964         xlog_t          *log)
3965 {
3966         xfs_mount_t     *mp;
3967         xfs_agf_t       *agfp;
3968         xfs_buf_t       *agfbp;
3969         xfs_buf_t       *agibp;
3970         xfs_agnumber_t  agno;
3971         __uint64_t      freeblks;
3972         __uint64_t      itotal;
3973         __uint64_t      ifree;
3974         int             error;
3975
3976         mp = log->l_mp;
3977
3978         freeblks = 0LL;
3979         itotal = 0LL;
3980         ifree = 0LL;
3981         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3982                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3983                 if (error) {
3984                         xfs_fs_cmn_err(CE_ALERT, mp,
3985                                         "xlog_recover_check_summary(agf)"
3986                                         "agf read failed agno %d error %d",
3987                                                         agno, error);
3988                 } else {
3989                         agfp = XFS_BUF_TO_AGF(agfbp);
3990                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
3991                                     be32_to_cpu(agfp->agf_flcount);
3992                         xfs_buf_relse(agfbp);
3993                 }
3994
3995                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3996                 if (!error) {
3997                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
3998
3999                         itotal += be32_to_cpu(agi->agi_count);
4000                         ifree += be32_to_cpu(agi->agi_freecount);
4001                         xfs_buf_relse(agibp);
4002                 }
4003         }
4004 }
4005 #endif /* DEBUG */