xfs: rename xfs_buf_get_nodaddr to be more appropriate
[cascardo/linux.git] / fs / xfs / xfs_log_recover.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_log_priv.h"
38 #include "xfs_buf_item.h"
39 #include "xfs_log_recover.h"
40 #include "xfs_extfree_item.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_quota.h"
43 #include "xfs_rw.h"
44 #include "xfs_utils.h"
45 #include "xfs_trace.h"
46
47 STATIC int      xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
48 STATIC int      xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
49 #if defined(DEBUG)
50 STATIC void     xlog_recover_check_summary(xlog_t *);
51 #else
52 #define xlog_recover_check_summary(log)
53 #endif
54
55 /*
56  * Sector aligned buffer routines for buffer create/read/write/access
57  */
58
59 /*
60  * Verify the given count of basic blocks is valid number of blocks
61  * to specify for an operation involving the given XFS log buffer.
62  * Returns nonzero if the count is valid, 0 otherwise.
63  */
64
65 static inline int
66 xlog_buf_bbcount_valid(
67         xlog_t          *log,
68         int             bbcount)
69 {
70         return bbcount > 0 && bbcount <= log->l_logBBsize;
71 }
72
73 /*
74  * Allocate a buffer to hold log data.  The buffer needs to be able
75  * to map to a range of nbblks basic blocks at any valid (basic
76  * block) offset within the log.
77  */
78 STATIC xfs_buf_t *
79 xlog_get_bp(
80         xlog_t          *log,
81         int             nbblks)
82 {
83         if (!xlog_buf_bbcount_valid(log, nbblks)) {
84                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
85                         nbblks);
86                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
87                 return NULL;
88         }
89
90         /*
91          * We do log I/O in units of log sectors (a power-of-2
92          * multiple of the basic block size), so we round up the
93          * requested size to acommodate the basic blocks required
94          * for complete log sectors.
95          *
96          * In addition, the buffer may be used for a non-sector-
97          * aligned block offset, in which case an I/O of the
98          * requested size could extend beyond the end of the
99          * buffer.  If the requested size is only 1 basic block it
100          * will never straddle a sector boundary, so this won't be
101          * an issue.  Nor will this be a problem if the log I/O is
102          * done in basic blocks (sector size 1).  But otherwise we
103          * extend the buffer by one extra log sector to ensure
104          * there's space to accomodate this possiblility.
105          */
106         if (nbblks > 1 && log->l_sectBBsize > 1)
107                 nbblks += log->l_sectBBsize;
108         nbblks = round_up(nbblks, log->l_sectBBsize);
109
110         return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
111                                         BBTOB(nbblks), 0);
112 }
113
114 STATIC void
115 xlog_put_bp(
116         xfs_buf_t       *bp)
117 {
118         xfs_buf_free(bp);
119 }
120
121 /*
122  * Return the address of the start of the given block number's data
123  * in a log buffer.  The buffer covers a log sector-aligned region.
124  */
125 STATIC xfs_caddr_t
126 xlog_align(
127         xlog_t          *log,
128         xfs_daddr_t     blk_no,
129         int             nbblks,
130         xfs_buf_t       *bp)
131 {
132         xfs_daddr_t     offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
133
134         ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp));
135         return XFS_BUF_PTR(bp) + BBTOB(offset);
136 }
137
138
139 /*
140  * nbblks should be uint, but oh well.  Just want to catch that 32-bit length.
141  */
142 STATIC int
143 xlog_bread_noalign(
144         xlog_t          *log,
145         xfs_daddr_t     blk_no,
146         int             nbblks,
147         xfs_buf_t       *bp)
148 {
149         int             error;
150
151         if (!xlog_buf_bbcount_valid(log, nbblks)) {
152                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
153                         nbblks);
154                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
155                 return EFSCORRUPTED;
156         }
157
158         blk_no = round_down(blk_no, log->l_sectBBsize);
159         nbblks = round_up(nbblks, log->l_sectBBsize);
160
161         ASSERT(nbblks > 0);
162         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
163
164         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
165         XFS_BUF_READ(bp);
166         XFS_BUF_BUSY(bp);
167         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
168         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
169
170         xfsbdstrat(log->l_mp, bp);
171         error = xfs_iowait(bp);
172         if (error)
173                 xfs_ioerror_alert("xlog_bread", log->l_mp,
174                                   bp, XFS_BUF_ADDR(bp));
175         return error;
176 }
177
178 STATIC int
179 xlog_bread(
180         xlog_t          *log,
181         xfs_daddr_t     blk_no,
182         int             nbblks,
183         xfs_buf_t       *bp,
184         xfs_caddr_t     *offset)
185 {
186         int             error;
187
188         error = xlog_bread_noalign(log, blk_no, nbblks, bp);
189         if (error)
190                 return error;
191
192         *offset = xlog_align(log, blk_no, nbblks, bp);
193         return 0;
194 }
195
196 /*
197  * Write out the buffer at the given block for the given number of blocks.
198  * The buffer is kept locked across the write and is returned locked.
199  * This can only be used for synchronous log writes.
200  */
201 STATIC int
202 xlog_bwrite(
203         xlog_t          *log,
204         xfs_daddr_t     blk_no,
205         int             nbblks,
206         xfs_buf_t       *bp)
207 {
208         int             error;
209
210         if (!xlog_buf_bbcount_valid(log, nbblks)) {
211                 xlog_warn("XFS: Invalid block length (0x%x) given for buffer",
212                         nbblks);
213                 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
214                 return EFSCORRUPTED;
215         }
216
217         blk_no = round_down(blk_no, log->l_sectBBsize);
218         nbblks = round_up(nbblks, log->l_sectBBsize);
219
220         ASSERT(nbblks > 0);
221         ASSERT(BBTOB(nbblks) <= XFS_BUF_SIZE(bp));
222
223         XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
224         XFS_BUF_ZEROFLAGS(bp);
225         XFS_BUF_BUSY(bp);
226         XFS_BUF_HOLD(bp);
227         XFS_BUF_PSEMA(bp, PRIBIO);
228         XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
229         XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
230
231         if ((error = xfs_bwrite(log->l_mp, bp)))
232                 xfs_ioerror_alert("xlog_bwrite", log->l_mp,
233                                   bp, XFS_BUF_ADDR(bp));
234         return error;
235 }
236
237 #ifdef DEBUG
238 /*
239  * dump debug superblock and log record information
240  */
241 STATIC void
242 xlog_header_check_dump(
243         xfs_mount_t             *mp,
244         xlog_rec_header_t       *head)
245 {
246         cmn_err(CE_DEBUG, "%s:  SB : uuid = %pU, fmt = %d\n",
247                 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
248         cmn_err(CE_DEBUG, "    log : uuid = %pU, fmt = %d\n",
249                 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
250 }
251 #else
252 #define xlog_header_check_dump(mp, head)
253 #endif
254
255 /*
256  * check log record header for recovery
257  */
258 STATIC int
259 xlog_header_check_recover(
260         xfs_mount_t             *mp,
261         xlog_rec_header_t       *head)
262 {
263         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
264
265         /*
266          * IRIX doesn't write the h_fmt field and leaves it zeroed
267          * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
268          * a dirty log created in IRIX.
269          */
270         if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
271                 xlog_warn(
272         "XFS: dirty log written in incompatible format - can't recover");
273                 xlog_header_check_dump(mp, head);
274                 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
275                                  XFS_ERRLEVEL_HIGH, mp);
276                 return XFS_ERROR(EFSCORRUPTED);
277         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
278                 xlog_warn(
279         "XFS: dirty log entry has mismatched uuid - can't recover");
280                 xlog_header_check_dump(mp, head);
281                 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
282                                  XFS_ERRLEVEL_HIGH, mp);
283                 return XFS_ERROR(EFSCORRUPTED);
284         }
285         return 0;
286 }
287
288 /*
289  * read the head block of the log and check the header
290  */
291 STATIC int
292 xlog_header_check_mount(
293         xfs_mount_t             *mp,
294         xlog_rec_header_t       *head)
295 {
296         ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
297
298         if (uuid_is_nil(&head->h_fs_uuid)) {
299                 /*
300                  * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
301                  * h_fs_uuid is nil, we assume this log was last mounted
302                  * by IRIX and continue.
303                  */
304                 xlog_warn("XFS: nil uuid in log - IRIX style log");
305         } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
306                 xlog_warn("XFS: log has mismatched uuid - can't recover");
307                 xlog_header_check_dump(mp, head);
308                 XFS_ERROR_REPORT("xlog_header_check_mount",
309                                  XFS_ERRLEVEL_HIGH, mp);
310                 return XFS_ERROR(EFSCORRUPTED);
311         }
312         return 0;
313 }
314
315 STATIC void
316 xlog_recover_iodone(
317         struct xfs_buf  *bp)
318 {
319         if (XFS_BUF_GETERROR(bp)) {
320                 /*
321                  * We're not going to bother about retrying
322                  * this during recovery. One strike!
323                  */
324                 xfs_ioerror_alert("xlog_recover_iodone",
325                                   bp->b_mount, bp, XFS_BUF_ADDR(bp));
326                 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
327         }
328         bp->b_mount = NULL;
329         XFS_BUF_CLR_IODONE_FUNC(bp);
330         xfs_biodone(bp);
331 }
332
333 /*
334  * This routine finds (to an approximation) the first block in the physical
335  * log which contains the given cycle.  It uses a binary search algorithm.
336  * Note that the algorithm can not be perfect because the disk will not
337  * necessarily be perfect.
338  */
339 STATIC int
340 xlog_find_cycle_start(
341         xlog_t          *log,
342         xfs_buf_t       *bp,
343         xfs_daddr_t     first_blk,
344         xfs_daddr_t     *last_blk,
345         uint            cycle)
346 {
347         xfs_caddr_t     offset;
348         xfs_daddr_t     mid_blk;
349         xfs_daddr_t     end_blk;
350         uint            mid_cycle;
351         int             error;
352
353         end_blk = *last_blk;
354         mid_blk = BLK_AVG(first_blk, end_blk);
355         while (mid_blk != first_blk && mid_blk != end_blk) {
356                 error = xlog_bread(log, mid_blk, 1, bp, &offset);
357                 if (error)
358                         return error;
359                 mid_cycle = xlog_get_cycle(offset);
360                 if (mid_cycle == cycle)
361                         end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
362                 else
363                         first_blk = mid_blk; /* first_half_cycle == mid_cycle */
364                 mid_blk = BLK_AVG(first_blk, end_blk);
365         }
366         ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
367                (mid_blk == end_blk && mid_blk-1 == first_blk));
368
369         *last_blk = end_blk;
370
371         return 0;
372 }
373
374 /*
375  * Check that a range of blocks does not contain stop_on_cycle_no.
376  * Fill in *new_blk with the block offset where such a block is
377  * found, or with -1 (an invalid block number) if there is no such
378  * block in the range.  The scan needs to occur from front to back
379  * and the pointer into the region must be updated since a later
380  * routine will need to perform another test.
381  */
382 STATIC int
383 xlog_find_verify_cycle(
384         xlog_t          *log,
385         xfs_daddr_t     start_blk,
386         int             nbblks,
387         uint            stop_on_cycle_no,
388         xfs_daddr_t     *new_blk)
389 {
390         xfs_daddr_t     i, j;
391         uint            cycle;
392         xfs_buf_t       *bp;
393         xfs_daddr_t     bufblks;
394         xfs_caddr_t     buf = NULL;
395         int             error = 0;
396
397         /*
398          * Greedily allocate a buffer big enough to handle the full
399          * range of basic blocks we'll be examining.  If that fails,
400          * try a smaller size.  We need to be able to read at least
401          * a log sector, or we're out of luck.
402          */
403         bufblks = 1 << ffs(nbblks);
404         while (!(bp = xlog_get_bp(log, bufblks))) {
405                 bufblks >>= 1;
406                 if (bufblks < log->l_sectBBsize)
407                         return ENOMEM;
408         }
409
410         for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
411                 int     bcount;
412
413                 bcount = min(bufblks, (start_blk + nbblks - i));
414
415                 error = xlog_bread(log, i, bcount, bp, &buf);
416                 if (error)
417                         goto out;
418
419                 for (j = 0; j < bcount; j++) {
420                         cycle = xlog_get_cycle(buf);
421                         if (cycle == stop_on_cycle_no) {
422                                 *new_blk = i+j;
423                                 goto out;
424                         }
425
426                         buf += BBSIZE;
427                 }
428         }
429
430         *new_blk = -1;
431
432 out:
433         xlog_put_bp(bp);
434         return error;
435 }
436
437 /*
438  * Potentially backup over partial log record write.
439  *
440  * In the typical case, last_blk is the number of the block directly after
441  * a good log record.  Therefore, we subtract one to get the block number
442  * of the last block in the given buffer.  extra_bblks contains the number
443  * of blocks we would have read on a previous read.  This happens when the
444  * last log record is split over the end of the physical log.
445  *
446  * extra_bblks is the number of blocks potentially verified on a previous
447  * call to this routine.
448  */
449 STATIC int
450 xlog_find_verify_log_record(
451         xlog_t                  *log,
452         xfs_daddr_t             start_blk,
453         xfs_daddr_t             *last_blk,
454         int                     extra_bblks)
455 {
456         xfs_daddr_t             i;
457         xfs_buf_t               *bp;
458         xfs_caddr_t             offset = NULL;
459         xlog_rec_header_t       *head = NULL;
460         int                     error = 0;
461         int                     smallmem = 0;
462         int                     num_blks = *last_blk - start_blk;
463         int                     xhdrs;
464
465         ASSERT(start_blk != 0 || *last_blk != start_blk);
466
467         if (!(bp = xlog_get_bp(log, num_blks))) {
468                 if (!(bp = xlog_get_bp(log, 1)))
469                         return ENOMEM;
470                 smallmem = 1;
471         } else {
472                 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
473                 if (error)
474                         goto out;
475                 offset += ((num_blks - 1) << BBSHIFT);
476         }
477
478         for (i = (*last_blk) - 1; i >= 0; i--) {
479                 if (i < start_blk) {
480                         /* valid log record not found */
481                         xlog_warn(
482                 "XFS: Log inconsistent (didn't find previous header)");
483                         ASSERT(0);
484                         error = XFS_ERROR(EIO);
485                         goto out;
486                 }
487
488                 if (smallmem) {
489                         error = xlog_bread(log, i, 1, bp, &offset);
490                         if (error)
491                                 goto out;
492                 }
493
494                 head = (xlog_rec_header_t *)offset;
495
496                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
497                         break;
498
499                 if (!smallmem)
500                         offset -= BBSIZE;
501         }
502
503         /*
504          * We hit the beginning of the physical log & still no header.  Return
505          * to caller.  If caller can handle a return of -1, then this routine
506          * will be called again for the end of the physical log.
507          */
508         if (i == -1) {
509                 error = -1;
510                 goto out;
511         }
512
513         /*
514          * We have the final block of the good log (the first block
515          * of the log record _before_ the head. So we check the uuid.
516          */
517         if ((error = xlog_header_check_mount(log->l_mp, head)))
518                 goto out;
519
520         /*
521          * We may have found a log record header before we expected one.
522          * last_blk will be the 1st block # with a given cycle #.  We may end
523          * up reading an entire log record.  In this case, we don't want to
524          * reset last_blk.  Only when last_blk points in the middle of a log
525          * record do we update last_blk.
526          */
527         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
528                 uint    h_size = be32_to_cpu(head->h_size);
529
530                 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
531                 if (h_size % XLOG_HEADER_CYCLE_SIZE)
532                         xhdrs++;
533         } else {
534                 xhdrs = 1;
535         }
536
537         if (*last_blk - i + extra_bblks !=
538             BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
539                 *last_blk = i;
540
541 out:
542         xlog_put_bp(bp);
543         return error;
544 }
545
546 /*
547  * Head is defined to be the point of the log where the next log write
548  * write could go.  This means that incomplete LR writes at the end are
549  * eliminated when calculating the head.  We aren't guaranteed that previous
550  * LR have complete transactions.  We only know that a cycle number of
551  * current cycle number -1 won't be present in the log if we start writing
552  * from our current block number.
553  *
554  * last_blk contains the block number of the first block with a given
555  * cycle number.
556  *
557  * Return: zero if normal, non-zero if error.
558  */
559 STATIC int
560 xlog_find_head(
561         xlog_t          *log,
562         xfs_daddr_t     *return_head_blk)
563 {
564         xfs_buf_t       *bp;
565         xfs_caddr_t     offset;
566         xfs_daddr_t     new_blk, first_blk, start_blk, last_blk, head_blk;
567         int             num_scan_bblks;
568         uint            first_half_cycle, last_half_cycle;
569         uint            stop_on_cycle;
570         int             error, log_bbnum = log->l_logBBsize;
571
572         /* Is the end of the log device zeroed? */
573         if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
574                 *return_head_blk = first_blk;
575
576                 /* Is the whole lot zeroed? */
577                 if (!first_blk) {
578                         /* Linux XFS shouldn't generate totally zeroed logs -
579                          * mkfs etc write a dummy unmount record to a fresh
580                          * log so we can store the uuid in there
581                          */
582                         xlog_warn("XFS: totally zeroed log");
583                 }
584
585                 return 0;
586         } else if (error) {
587                 xlog_warn("XFS: empty log check failed");
588                 return error;
589         }
590
591         first_blk = 0;                  /* get cycle # of 1st block */
592         bp = xlog_get_bp(log, 1);
593         if (!bp)
594                 return ENOMEM;
595
596         error = xlog_bread(log, 0, 1, bp, &offset);
597         if (error)
598                 goto bp_err;
599
600         first_half_cycle = xlog_get_cycle(offset);
601
602         last_blk = head_blk = log_bbnum - 1;    /* get cycle # of last block */
603         error = xlog_bread(log, last_blk, 1, bp, &offset);
604         if (error)
605                 goto bp_err;
606
607         last_half_cycle = xlog_get_cycle(offset);
608         ASSERT(last_half_cycle != 0);
609
610         /*
611          * If the 1st half cycle number is equal to the last half cycle number,
612          * then the entire log is stamped with the same cycle number.  In this
613          * case, head_blk can't be set to zero (which makes sense).  The below
614          * math doesn't work out properly with head_blk equal to zero.  Instead,
615          * we set it to log_bbnum which is an invalid block number, but this
616          * value makes the math correct.  If head_blk doesn't changed through
617          * all the tests below, *head_blk is set to zero at the very end rather
618          * than log_bbnum.  In a sense, log_bbnum and zero are the same block
619          * in a circular file.
620          */
621         if (first_half_cycle == last_half_cycle) {
622                 /*
623                  * In this case we believe that the entire log should have
624                  * cycle number last_half_cycle.  We need to scan backwards
625                  * from the end verifying that there are no holes still
626                  * containing last_half_cycle - 1.  If we find such a hole,
627                  * then the start of that hole will be the new head.  The
628                  * simple case looks like
629                  *        x | x ... | x - 1 | x
630                  * Another case that fits this picture would be
631                  *        x | x + 1 | x ... | x
632                  * In this case the head really is somewhere at the end of the
633                  * log, as one of the latest writes at the beginning was
634                  * incomplete.
635                  * One more case is
636                  *        x | x + 1 | x ... | x - 1 | x
637                  * This is really the combination of the above two cases, and
638                  * the head has to end up at the start of the x-1 hole at the
639                  * end of the log.
640                  *
641                  * In the 256k log case, we will read from the beginning to the
642                  * end of the log and search for cycle numbers equal to x-1.
643                  * We don't worry about the x+1 blocks that we encounter,
644                  * because we know that they cannot be the head since the log
645                  * started with x.
646                  */
647                 head_blk = log_bbnum;
648                 stop_on_cycle = last_half_cycle - 1;
649         } else {
650                 /*
651                  * In this case we want to find the first block with cycle
652                  * number matching last_half_cycle.  We expect the log to be
653                  * some variation on
654                  *        x + 1 ... | x ... | x
655                  * The first block with cycle number x (last_half_cycle) will
656                  * be where the new head belongs.  First we do a binary search
657                  * for the first occurrence of last_half_cycle.  The binary
658                  * search may not be totally accurate, so then we scan back
659                  * from there looking for occurrences of last_half_cycle before
660                  * us.  If that backwards scan wraps around the beginning of
661                  * the log, then we look for occurrences of last_half_cycle - 1
662                  * at the end of the log.  The cases we're looking for look
663                  * like
664                  *                               v binary search stopped here
665                  *        x + 1 ... | x | x + 1 | x ... | x
666                  *                   ^ but we want to locate this spot
667                  * or
668                  *        <---------> less than scan distance
669                  *        x + 1 ... | x ... | x - 1 | x
670                  *                           ^ we want to locate this spot
671                  */
672                 stop_on_cycle = last_half_cycle;
673                 if ((error = xlog_find_cycle_start(log, bp, first_blk,
674                                                 &head_blk, last_half_cycle)))
675                         goto bp_err;
676         }
677
678         /*
679          * Now validate the answer.  Scan back some number of maximum possible
680          * blocks and make sure each one has the expected cycle number.  The
681          * maximum is determined by the total possible amount of buffering
682          * in the in-core log.  The following number can be made tighter if
683          * we actually look at the block size of the filesystem.
684          */
685         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
686         if (head_blk >= num_scan_bblks) {
687                 /*
688                  * We are guaranteed that the entire check can be performed
689                  * in one buffer.
690                  */
691                 start_blk = head_blk - num_scan_bblks;
692                 if ((error = xlog_find_verify_cycle(log,
693                                                 start_blk, num_scan_bblks,
694                                                 stop_on_cycle, &new_blk)))
695                         goto bp_err;
696                 if (new_blk != -1)
697                         head_blk = new_blk;
698         } else {                /* need to read 2 parts of log */
699                 /*
700                  * We are going to scan backwards in the log in two parts.
701                  * First we scan the physical end of the log.  In this part
702                  * of the log, we are looking for blocks with cycle number
703                  * last_half_cycle - 1.
704                  * If we find one, then we know that the log starts there, as
705                  * we've found a hole that didn't get written in going around
706                  * the end of the physical log.  The simple case for this is
707                  *        x + 1 ... | x ... | x - 1 | x
708                  *        <---------> less than scan distance
709                  * If all of the blocks at the end of the log have cycle number
710                  * last_half_cycle, then we check the blocks at the start of
711                  * the log looking for occurrences of last_half_cycle.  If we
712                  * find one, then our current estimate for the location of the
713                  * first occurrence of last_half_cycle is wrong and we move
714                  * back to the hole we've found.  This case looks like
715                  *        x + 1 ... | x | x + 1 | x ...
716                  *                               ^ binary search stopped here
717                  * Another case we need to handle that only occurs in 256k
718                  * logs is
719                  *        x + 1 ... | x ... | x+1 | x ...
720                  *                   ^ binary search stops here
721                  * In a 256k log, the scan at the end of the log will see the
722                  * x + 1 blocks.  We need to skip past those since that is
723                  * certainly not the head of the log.  By searching for
724                  * last_half_cycle-1 we accomplish that.
725                  */
726                 ASSERT(head_blk <= INT_MAX &&
727                         (xfs_daddr_t) num_scan_bblks >= head_blk);
728                 start_blk = log_bbnum - (num_scan_bblks - head_blk);
729                 if ((error = xlog_find_verify_cycle(log, start_blk,
730                                         num_scan_bblks - (int)head_blk,
731                                         (stop_on_cycle - 1), &new_blk)))
732                         goto bp_err;
733                 if (new_blk != -1) {
734                         head_blk = new_blk;
735                         goto validate_head;
736                 }
737
738                 /*
739                  * Scan beginning of log now.  The last part of the physical
740                  * log is good.  This scan needs to verify that it doesn't find
741                  * the last_half_cycle.
742                  */
743                 start_blk = 0;
744                 ASSERT(head_blk <= INT_MAX);
745                 if ((error = xlog_find_verify_cycle(log,
746                                         start_blk, (int)head_blk,
747                                         stop_on_cycle, &new_blk)))
748                         goto bp_err;
749                 if (new_blk != -1)
750                         head_blk = new_blk;
751         }
752
753 validate_head:
754         /*
755          * Now we need to make sure head_blk is not pointing to a block in
756          * the middle of a log record.
757          */
758         num_scan_bblks = XLOG_REC_SHIFT(log);
759         if (head_blk >= num_scan_bblks) {
760                 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
761
762                 /* start ptr at last block ptr before head_blk */
763                 if ((error = xlog_find_verify_log_record(log, start_blk,
764                                                         &head_blk, 0)) == -1) {
765                         error = XFS_ERROR(EIO);
766                         goto bp_err;
767                 } else if (error)
768                         goto bp_err;
769         } else {
770                 start_blk = 0;
771                 ASSERT(head_blk <= INT_MAX);
772                 if ((error = xlog_find_verify_log_record(log, start_blk,
773                                                         &head_blk, 0)) == -1) {
774                         /* We hit the beginning of the log during our search */
775                         start_blk = log_bbnum - (num_scan_bblks - head_blk);
776                         new_blk = log_bbnum;
777                         ASSERT(start_blk <= INT_MAX &&
778                                 (xfs_daddr_t) log_bbnum-start_blk >= 0);
779                         ASSERT(head_blk <= INT_MAX);
780                         if ((error = xlog_find_verify_log_record(log,
781                                                         start_blk, &new_blk,
782                                                         (int)head_blk)) == -1) {
783                                 error = XFS_ERROR(EIO);
784                                 goto bp_err;
785                         } else if (error)
786                                 goto bp_err;
787                         if (new_blk != log_bbnum)
788                                 head_blk = new_blk;
789                 } else if (error)
790                         goto bp_err;
791         }
792
793         xlog_put_bp(bp);
794         if (head_blk == log_bbnum)
795                 *return_head_blk = 0;
796         else
797                 *return_head_blk = head_blk;
798         /*
799          * When returning here, we have a good block number.  Bad block
800          * means that during a previous crash, we didn't have a clean break
801          * from cycle number N to cycle number N-1.  In this case, we need
802          * to find the first block with cycle number N-1.
803          */
804         return 0;
805
806  bp_err:
807         xlog_put_bp(bp);
808
809         if (error)
810             xlog_warn("XFS: failed to find log head");
811         return error;
812 }
813
814 /*
815  * Find the sync block number or the tail of the log.
816  *
817  * This will be the block number of the last record to have its
818  * associated buffers synced to disk.  Every log record header has
819  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
820  * to get a sync block number.  The only concern is to figure out which
821  * log record header to believe.
822  *
823  * The following algorithm uses the log record header with the largest
824  * lsn.  The entire log record does not need to be valid.  We only care
825  * that the header is valid.
826  *
827  * We could speed up search by using current head_blk buffer, but it is not
828  * available.
829  */
830 STATIC int
831 xlog_find_tail(
832         xlog_t                  *log,
833         xfs_daddr_t             *head_blk,
834         xfs_daddr_t             *tail_blk)
835 {
836         xlog_rec_header_t       *rhead;
837         xlog_op_header_t        *op_head;
838         xfs_caddr_t             offset = NULL;
839         xfs_buf_t               *bp;
840         int                     error, i, found;
841         xfs_daddr_t             umount_data_blk;
842         xfs_daddr_t             after_umount_blk;
843         xfs_lsn_t               tail_lsn;
844         int                     hblks;
845
846         found = 0;
847
848         /*
849          * Find previous log record
850          */
851         if ((error = xlog_find_head(log, head_blk)))
852                 return error;
853
854         bp = xlog_get_bp(log, 1);
855         if (!bp)
856                 return ENOMEM;
857         if (*head_blk == 0) {                           /* special case */
858                 error = xlog_bread(log, 0, 1, bp, &offset);
859                 if (error)
860                         goto done;
861
862                 if (xlog_get_cycle(offset) == 0) {
863                         *tail_blk = 0;
864                         /* leave all other log inited values alone */
865                         goto done;
866                 }
867         }
868
869         /*
870          * Search backwards looking for log record header block
871          */
872         ASSERT(*head_blk < INT_MAX);
873         for (i = (int)(*head_blk) - 1; i >= 0; i--) {
874                 error = xlog_bread(log, i, 1, bp, &offset);
875                 if (error)
876                         goto done;
877
878                 if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
879                         found = 1;
880                         break;
881                 }
882         }
883         /*
884          * If we haven't found the log record header block, start looking
885          * again from the end of the physical log.  XXXmiken: There should be
886          * a check here to make sure we didn't search more than N blocks in
887          * the previous code.
888          */
889         if (!found) {
890                 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
891                         error = xlog_bread(log, i, 1, bp, &offset);
892                         if (error)
893                                 goto done;
894
895                         if (XLOG_HEADER_MAGIC_NUM ==
896                             be32_to_cpu(*(__be32 *)offset)) {
897                                 found = 2;
898                                 break;
899                         }
900                 }
901         }
902         if (!found) {
903                 xlog_warn("XFS: xlog_find_tail: couldn't find sync record");
904                 ASSERT(0);
905                 return XFS_ERROR(EIO);
906         }
907
908         /* find blk_no of tail of log */
909         rhead = (xlog_rec_header_t *)offset;
910         *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
911
912         /*
913          * Reset log values according to the state of the log when we
914          * crashed.  In the case where head_blk == 0, we bump curr_cycle
915          * one because the next write starts a new cycle rather than
916          * continuing the cycle of the last good log record.  At this
917          * point we have guaranteed that all partial log records have been
918          * accounted for.  Therefore, we know that the last good log record
919          * written was complete and ended exactly on the end boundary
920          * of the physical log.
921          */
922         log->l_prev_block = i;
923         log->l_curr_block = (int)*head_blk;
924         log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
925         if (found == 2)
926                 log->l_curr_cycle++;
927         log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
928         log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
929         log->l_grant_reserve_cycle = log->l_curr_cycle;
930         log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
931         log->l_grant_write_cycle = log->l_curr_cycle;
932         log->l_grant_write_bytes = BBTOB(log->l_curr_block);
933
934         /*
935          * Look for unmount record.  If we find it, then we know there
936          * was a clean unmount.  Since 'i' could be the last block in
937          * the physical log, we convert to a log block before comparing
938          * to the head_blk.
939          *
940          * Save the current tail lsn to use to pass to
941          * xlog_clear_stale_blocks() below.  We won't want to clear the
942          * unmount record if there is one, so we pass the lsn of the
943          * unmount record rather than the block after it.
944          */
945         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
946                 int     h_size = be32_to_cpu(rhead->h_size);
947                 int     h_version = be32_to_cpu(rhead->h_version);
948
949                 if ((h_version & XLOG_VERSION_2) &&
950                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
951                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
952                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
953                                 hblks++;
954                 } else {
955                         hblks = 1;
956                 }
957         } else {
958                 hblks = 1;
959         }
960         after_umount_blk = (i + hblks + (int)
961                 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
962         tail_lsn = log->l_tail_lsn;
963         if (*head_blk == after_umount_blk &&
964             be32_to_cpu(rhead->h_num_logops) == 1) {
965                 umount_data_blk = (i + hblks) % log->l_logBBsize;
966                 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
967                 if (error)
968                         goto done;
969
970                 op_head = (xlog_op_header_t *)offset;
971                 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
972                         /*
973                          * Set tail and last sync so that newly written
974                          * log records will point recovery to after the
975                          * current unmount record.
976                          */
977                         log->l_tail_lsn =
978                                 xlog_assign_lsn(log->l_curr_cycle,
979                                                 after_umount_blk);
980                         log->l_last_sync_lsn =
981                                 xlog_assign_lsn(log->l_curr_cycle,
982                                                 after_umount_blk);
983                         *tail_blk = after_umount_blk;
984
985                         /*
986                          * Note that the unmount was clean. If the unmount
987                          * was not clean, we need to know this to rebuild the
988                          * superblock counters from the perag headers if we
989                          * have a filesystem using non-persistent counters.
990                          */
991                         log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
992                 }
993         }
994
995         /*
996          * Make sure that there are no blocks in front of the head
997          * with the same cycle number as the head.  This can happen
998          * because we allow multiple outstanding log writes concurrently,
999          * and the later writes might make it out before earlier ones.
1000          *
1001          * We use the lsn from before modifying it so that we'll never
1002          * overwrite the unmount record after a clean unmount.
1003          *
1004          * Do this only if we are going to recover the filesystem
1005          *
1006          * NOTE: This used to say "if (!readonly)"
1007          * However on Linux, we can & do recover a read-only filesystem.
1008          * We only skip recovery if NORECOVERY is specified on mount,
1009          * in which case we would not be here.
1010          *
1011          * But... if the -device- itself is readonly, just skip this.
1012          * We can't recover this device anyway, so it won't matter.
1013          */
1014         if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1015                 error = xlog_clear_stale_blocks(log, tail_lsn);
1016
1017 done:
1018         xlog_put_bp(bp);
1019
1020         if (error)
1021                 xlog_warn("XFS: failed to locate log tail");
1022         return error;
1023 }
1024
1025 /*
1026  * Is the log zeroed at all?
1027  *
1028  * The last binary search should be changed to perform an X block read
1029  * once X becomes small enough.  You can then search linearly through
1030  * the X blocks.  This will cut down on the number of reads we need to do.
1031  *
1032  * If the log is partially zeroed, this routine will pass back the blkno
1033  * of the first block with cycle number 0.  It won't have a complete LR
1034  * preceding it.
1035  *
1036  * Return:
1037  *      0  => the log is completely written to
1038  *      -1 => use *blk_no as the first block of the log
1039  *      >0 => error has occurred
1040  */
1041 STATIC int
1042 xlog_find_zeroed(
1043         xlog_t          *log,
1044         xfs_daddr_t     *blk_no)
1045 {
1046         xfs_buf_t       *bp;
1047         xfs_caddr_t     offset;
1048         uint            first_cycle, last_cycle;
1049         xfs_daddr_t     new_blk, last_blk, start_blk;
1050         xfs_daddr_t     num_scan_bblks;
1051         int             error, log_bbnum = log->l_logBBsize;
1052
1053         *blk_no = 0;
1054
1055         /* check totally zeroed log */
1056         bp = xlog_get_bp(log, 1);
1057         if (!bp)
1058                 return ENOMEM;
1059         error = xlog_bread(log, 0, 1, bp, &offset);
1060         if (error)
1061                 goto bp_err;
1062
1063         first_cycle = xlog_get_cycle(offset);
1064         if (first_cycle == 0) {         /* completely zeroed log */
1065                 *blk_no = 0;
1066                 xlog_put_bp(bp);
1067                 return -1;
1068         }
1069
1070         /* check partially zeroed log */
1071         error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1072         if (error)
1073                 goto bp_err;
1074
1075         last_cycle = xlog_get_cycle(offset);
1076         if (last_cycle != 0) {          /* log completely written to */
1077                 xlog_put_bp(bp);
1078                 return 0;
1079         } else if (first_cycle != 1) {
1080                 /*
1081                  * If the cycle of the last block is zero, the cycle of
1082                  * the first block must be 1. If it's not, maybe we're
1083                  * not looking at a log... Bail out.
1084                  */
1085                 xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)");
1086                 return XFS_ERROR(EINVAL);
1087         }
1088
1089         /* we have a partially zeroed log */
1090         last_blk = log_bbnum-1;
1091         if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1092                 goto bp_err;
1093
1094         /*
1095          * Validate the answer.  Because there is no way to guarantee that
1096          * the entire log is made up of log records which are the same size,
1097          * we scan over the defined maximum blocks.  At this point, the maximum
1098          * is not chosen to mean anything special.   XXXmiken
1099          */
1100         num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1101         ASSERT(num_scan_bblks <= INT_MAX);
1102
1103         if (last_blk < num_scan_bblks)
1104                 num_scan_bblks = last_blk;
1105         start_blk = last_blk - num_scan_bblks;
1106
1107         /*
1108          * We search for any instances of cycle number 0 that occur before
1109          * our current estimate of the head.  What we're trying to detect is
1110          *        1 ... | 0 | 1 | 0...
1111          *                       ^ binary search ends here
1112          */
1113         if ((error = xlog_find_verify_cycle(log, start_blk,
1114                                          (int)num_scan_bblks, 0, &new_blk)))
1115                 goto bp_err;
1116         if (new_blk != -1)
1117                 last_blk = new_blk;
1118
1119         /*
1120          * Potentially backup over partial log record write.  We don't need
1121          * to search the end of the log because we know it is zero.
1122          */
1123         if ((error = xlog_find_verify_log_record(log, start_blk,
1124                                 &last_blk, 0)) == -1) {
1125             error = XFS_ERROR(EIO);
1126             goto bp_err;
1127         } else if (error)
1128             goto bp_err;
1129
1130         *blk_no = last_blk;
1131 bp_err:
1132         xlog_put_bp(bp);
1133         if (error)
1134                 return error;
1135         return -1;
1136 }
1137
1138 /*
1139  * These are simple subroutines used by xlog_clear_stale_blocks() below
1140  * to initialize a buffer full of empty log record headers and write
1141  * them into the log.
1142  */
1143 STATIC void
1144 xlog_add_record(
1145         xlog_t                  *log,
1146         xfs_caddr_t             buf,
1147         int                     cycle,
1148         int                     block,
1149         int                     tail_cycle,
1150         int                     tail_block)
1151 {
1152         xlog_rec_header_t       *recp = (xlog_rec_header_t *)buf;
1153
1154         memset(buf, 0, BBSIZE);
1155         recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1156         recp->h_cycle = cpu_to_be32(cycle);
1157         recp->h_version = cpu_to_be32(
1158                         xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1159         recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1160         recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1161         recp->h_fmt = cpu_to_be32(XLOG_FMT);
1162         memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1163 }
1164
1165 STATIC int
1166 xlog_write_log_records(
1167         xlog_t          *log,
1168         int             cycle,
1169         int             start_block,
1170         int             blocks,
1171         int             tail_cycle,
1172         int             tail_block)
1173 {
1174         xfs_caddr_t     offset;
1175         xfs_buf_t       *bp;
1176         int             balign, ealign;
1177         int             sectbb = log->l_sectBBsize;
1178         int             end_block = start_block + blocks;
1179         int             bufblks;
1180         int             error = 0;
1181         int             i, j = 0;
1182
1183         /*
1184          * Greedily allocate a buffer big enough to handle the full
1185          * range of basic blocks to be written.  If that fails, try
1186          * a smaller size.  We need to be able to write at least a
1187          * log sector, or we're out of luck.
1188          */
1189         bufblks = 1 << ffs(blocks);
1190         while (!(bp = xlog_get_bp(log, bufblks))) {
1191                 bufblks >>= 1;
1192                 if (bufblks < sectbb)
1193                         return ENOMEM;
1194         }
1195
1196         /* We may need to do a read at the start to fill in part of
1197          * the buffer in the starting sector not covered by the first
1198          * write below.
1199          */
1200         balign = round_down(start_block, sectbb);
1201         if (balign != start_block) {
1202                 error = xlog_bread_noalign(log, start_block, 1, bp);
1203                 if (error)
1204                         goto out_put_bp;
1205
1206                 j = start_block - balign;
1207         }
1208
1209         for (i = start_block; i < end_block; i += bufblks) {
1210                 int             bcount, endcount;
1211
1212                 bcount = min(bufblks, end_block - start_block);
1213                 endcount = bcount - j;
1214
1215                 /* We may need to do a read at the end to fill in part of
1216                  * the buffer in the final sector not covered by the write.
1217                  * If this is the same sector as the above read, skip it.
1218                  */
1219                 ealign = round_down(end_block, sectbb);
1220                 if (j == 0 && (start_block + endcount > ealign)) {
1221                         offset = XFS_BUF_PTR(bp);
1222                         balign = BBTOB(ealign - start_block);
1223                         error = XFS_BUF_SET_PTR(bp, offset + balign,
1224                                                 BBTOB(sectbb));
1225                         if (error)
1226                                 break;
1227
1228                         error = xlog_bread_noalign(log, ealign, sectbb, bp);
1229                         if (error)
1230                                 break;
1231
1232                         error = XFS_BUF_SET_PTR(bp, offset, bufblks);
1233                         if (error)
1234                                 break;
1235                 }
1236
1237                 offset = xlog_align(log, start_block, endcount, bp);
1238                 for (; j < endcount; j++) {
1239                         xlog_add_record(log, offset, cycle, i+j,
1240                                         tail_cycle, tail_block);
1241                         offset += BBSIZE;
1242                 }
1243                 error = xlog_bwrite(log, start_block, endcount, bp);
1244                 if (error)
1245                         break;
1246                 start_block += endcount;
1247                 j = 0;
1248         }
1249
1250  out_put_bp:
1251         xlog_put_bp(bp);
1252         return error;
1253 }
1254
1255 /*
1256  * This routine is called to blow away any incomplete log writes out
1257  * in front of the log head.  We do this so that we won't become confused
1258  * if we come up, write only a little bit more, and then crash again.
1259  * If we leave the partial log records out there, this situation could
1260  * cause us to think those partial writes are valid blocks since they
1261  * have the current cycle number.  We get rid of them by overwriting them
1262  * with empty log records with the old cycle number rather than the
1263  * current one.
1264  *
1265  * The tail lsn is passed in rather than taken from
1266  * the log so that we will not write over the unmount record after a
1267  * clean unmount in a 512 block log.  Doing so would leave the log without
1268  * any valid log records in it until a new one was written.  If we crashed
1269  * during that time we would not be able to recover.
1270  */
1271 STATIC int
1272 xlog_clear_stale_blocks(
1273         xlog_t          *log,
1274         xfs_lsn_t       tail_lsn)
1275 {
1276         int             tail_cycle, head_cycle;
1277         int             tail_block, head_block;
1278         int             tail_distance, max_distance;
1279         int             distance;
1280         int             error;
1281
1282         tail_cycle = CYCLE_LSN(tail_lsn);
1283         tail_block = BLOCK_LSN(tail_lsn);
1284         head_cycle = log->l_curr_cycle;
1285         head_block = log->l_curr_block;
1286
1287         /*
1288          * Figure out the distance between the new head of the log
1289          * and the tail.  We want to write over any blocks beyond the
1290          * head that we may have written just before the crash, but
1291          * we don't want to overwrite the tail of the log.
1292          */
1293         if (head_cycle == tail_cycle) {
1294                 /*
1295                  * The tail is behind the head in the physical log,
1296                  * so the distance from the head to the tail is the
1297                  * distance from the head to the end of the log plus
1298                  * the distance from the beginning of the log to the
1299                  * tail.
1300                  */
1301                 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1302                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1303                                          XFS_ERRLEVEL_LOW, log->l_mp);
1304                         return XFS_ERROR(EFSCORRUPTED);
1305                 }
1306                 tail_distance = tail_block + (log->l_logBBsize - head_block);
1307         } else {
1308                 /*
1309                  * The head is behind the tail in the physical log,
1310                  * so the distance from the head to the tail is just
1311                  * the tail block minus the head block.
1312                  */
1313                 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1314                         XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1315                                          XFS_ERRLEVEL_LOW, log->l_mp);
1316                         return XFS_ERROR(EFSCORRUPTED);
1317                 }
1318                 tail_distance = tail_block - head_block;
1319         }
1320
1321         /*
1322          * If the head is right up against the tail, we can't clear
1323          * anything.
1324          */
1325         if (tail_distance <= 0) {
1326                 ASSERT(tail_distance == 0);
1327                 return 0;
1328         }
1329
1330         max_distance = XLOG_TOTAL_REC_SHIFT(log);
1331         /*
1332          * Take the smaller of the maximum amount of outstanding I/O
1333          * we could have and the distance to the tail to clear out.
1334          * We take the smaller so that we don't overwrite the tail and
1335          * we don't waste all day writing from the head to the tail
1336          * for no reason.
1337          */
1338         max_distance = MIN(max_distance, tail_distance);
1339
1340         if ((head_block + max_distance) <= log->l_logBBsize) {
1341                 /*
1342                  * We can stomp all the blocks we need to without
1343                  * wrapping around the end of the log.  Just do it
1344                  * in a single write.  Use the cycle number of the
1345                  * current cycle minus one so that the log will look like:
1346                  *     n ... | n - 1 ...
1347                  */
1348                 error = xlog_write_log_records(log, (head_cycle - 1),
1349                                 head_block, max_distance, tail_cycle,
1350                                 tail_block);
1351                 if (error)
1352                         return error;
1353         } else {
1354                 /*
1355                  * We need to wrap around the end of the physical log in
1356                  * order to clear all the blocks.  Do it in two separate
1357                  * I/Os.  The first write should be from the head to the
1358                  * end of the physical log, and it should use the current
1359                  * cycle number minus one just like above.
1360                  */
1361                 distance = log->l_logBBsize - head_block;
1362                 error = xlog_write_log_records(log, (head_cycle - 1),
1363                                 head_block, distance, tail_cycle,
1364                                 tail_block);
1365
1366                 if (error)
1367                         return error;
1368
1369                 /*
1370                  * Now write the blocks at the start of the physical log.
1371                  * This writes the remainder of the blocks we want to clear.
1372                  * It uses the current cycle number since we're now on the
1373                  * same cycle as the head so that we get:
1374                  *    n ... n ... | n - 1 ...
1375                  *    ^^^^^ blocks we're writing
1376                  */
1377                 distance = max_distance - (log->l_logBBsize - head_block);
1378                 error = xlog_write_log_records(log, head_cycle, 0, distance,
1379                                 tail_cycle, tail_block);
1380                 if (error)
1381                         return error;
1382         }
1383
1384         return 0;
1385 }
1386
1387 /******************************************************************************
1388  *
1389  *              Log recover routines
1390  *
1391  ******************************************************************************
1392  */
1393
1394 STATIC xlog_recover_t *
1395 xlog_recover_find_tid(
1396         struct hlist_head       *head,
1397         xlog_tid_t              tid)
1398 {
1399         xlog_recover_t          *trans;
1400         struct hlist_node       *n;
1401
1402         hlist_for_each_entry(trans, n, head, r_list) {
1403                 if (trans->r_log_tid == tid)
1404                         return trans;
1405         }
1406         return NULL;
1407 }
1408
1409 STATIC void
1410 xlog_recover_new_tid(
1411         struct hlist_head       *head,
1412         xlog_tid_t              tid,
1413         xfs_lsn_t               lsn)
1414 {
1415         xlog_recover_t          *trans;
1416
1417         trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1418         trans->r_log_tid   = tid;
1419         trans->r_lsn       = lsn;
1420         INIT_LIST_HEAD(&trans->r_itemq);
1421
1422         INIT_HLIST_NODE(&trans->r_list);
1423         hlist_add_head(&trans->r_list, head);
1424 }
1425
1426 STATIC void
1427 xlog_recover_add_item(
1428         struct list_head        *head)
1429 {
1430         xlog_recover_item_t     *item;
1431
1432         item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1433         INIT_LIST_HEAD(&item->ri_list);
1434         list_add_tail(&item->ri_list, head);
1435 }
1436
1437 STATIC int
1438 xlog_recover_add_to_cont_trans(
1439         struct log              *log,
1440         xlog_recover_t          *trans,
1441         xfs_caddr_t             dp,
1442         int                     len)
1443 {
1444         xlog_recover_item_t     *item;
1445         xfs_caddr_t             ptr, old_ptr;
1446         int                     old_len;
1447
1448         if (list_empty(&trans->r_itemq)) {
1449                 /* finish copying rest of trans header */
1450                 xlog_recover_add_item(&trans->r_itemq);
1451                 ptr = (xfs_caddr_t) &trans->r_theader +
1452                                 sizeof(xfs_trans_header_t) - len;
1453                 memcpy(ptr, dp, len); /* d, s, l */
1454                 return 0;
1455         }
1456         /* take the tail entry */
1457         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1458
1459         old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1460         old_len = item->ri_buf[item->ri_cnt-1].i_len;
1461
1462         ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1463         memcpy(&ptr[old_len], dp, len); /* d, s, l */
1464         item->ri_buf[item->ri_cnt-1].i_len += len;
1465         item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1466         trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1467         return 0;
1468 }
1469
1470 /*
1471  * The next region to add is the start of a new region.  It could be
1472  * a whole region or it could be the first part of a new region.  Because
1473  * of this, the assumption here is that the type and size fields of all
1474  * format structures fit into the first 32 bits of the structure.
1475  *
1476  * This works because all regions must be 32 bit aligned.  Therefore, we
1477  * either have both fields or we have neither field.  In the case we have
1478  * neither field, the data part of the region is zero length.  We only have
1479  * a log_op_header and can throw away the header since a new one will appear
1480  * later.  If we have at least 4 bytes, then we can determine how many regions
1481  * will appear in the current log item.
1482  */
1483 STATIC int
1484 xlog_recover_add_to_trans(
1485         struct log              *log,
1486         xlog_recover_t          *trans,
1487         xfs_caddr_t             dp,
1488         int                     len)
1489 {
1490         xfs_inode_log_format_t  *in_f;                  /* any will do */
1491         xlog_recover_item_t     *item;
1492         xfs_caddr_t             ptr;
1493
1494         if (!len)
1495                 return 0;
1496         if (list_empty(&trans->r_itemq)) {
1497                 /* we need to catch log corruptions here */
1498                 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1499                         xlog_warn("XFS: xlog_recover_add_to_trans: "
1500                                   "bad header magic number");
1501                         ASSERT(0);
1502                         return XFS_ERROR(EIO);
1503                 }
1504                 if (len == sizeof(xfs_trans_header_t))
1505                         xlog_recover_add_item(&trans->r_itemq);
1506                 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1507                 return 0;
1508         }
1509
1510         ptr = kmem_alloc(len, KM_SLEEP);
1511         memcpy(ptr, dp, len);
1512         in_f = (xfs_inode_log_format_t *)ptr;
1513
1514         /* take the tail entry */
1515         item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1516         if (item->ri_total != 0 &&
1517              item->ri_total == item->ri_cnt) {
1518                 /* tail item is in use, get a new one */
1519                 xlog_recover_add_item(&trans->r_itemq);
1520                 item = list_entry(trans->r_itemq.prev,
1521                                         xlog_recover_item_t, ri_list);
1522         }
1523
1524         if (item->ri_total == 0) {              /* first region to be added */
1525                 if (in_f->ilf_size == 0 ||
1526                     in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1527                         xlog_warn(
1528         "XFS: bad number of regions (%d) in inode log format",
1529                                   in_f->ilf_size);
1530                         ASSERT(0);
1531                         return XFS_ERROR(EIO);
1532                 }
1533
1534                 item->ri_total = in_f->ilf_size;
1535                 item->ri_buf =
1536                         kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1537                                     KM_SLEEP);
1538         }
1539         ASSERT(item->ri_total > item->ri_cnt);
1540         /* Description region is ri_buf[0] */
1541         item->ri_buf[item->ri_cnt].i_addr = ptr;
1542         item->ri_buf[item->ri_cnt].i_len  = len;
1543         item->ri_cnt++;
1544         trace_xfs_log_recover_item_add(log, trans, item, 0);
1545         return 0;
1546 }
1547
1548 /*
1549  * Sort the log items in the transaction. Cancelled buffers need
1550  * to be put first so they are processed before any items that might
1551  * modify the buffers. If they are cancelled, then the modifications
1552  * don't need to be replayed.
1553  */
1554 STATIC int
1555 xlog_recover_reorder_trans(
1556         struct log              *log,
1557         xlog_recover_t          *trans,
1558         int                     pass)
1559 {
1560         xlog_recover_item_t     *item, *n;
1561         LIST_HEAD(sort_list);
1562
1563         list_splice_init(&trans->r_itemq, &sort_list);
1564         list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1565                 xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
1566
1567                 switch (ITEM_TYPE(item)) {
1568                 case XFS_LI_BUF:
1569                         if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1570                                 trace_xfs_log_recover_item_reorder_head(log,
1571                                                         trans, item, pass);
1572                                 list_move(&item->ri_list, &trans->r_itemq);
1573                                 break;
1574                         }
1575                 case XFS_LI_INODE:
1576                 case XFS_LI_DQUOT:
1577                 case XFS_LI_QUOTAOFF:
1578                 case XFS_LI_EFD:
1579                 case XFS_LI_EFI:
1580                         trace_xfs_log_recover_item_reorder_tail(log,
1581                                                         trans, item, pass);
1582                         list_move_tail(&item->ri_list, &trans->r_itemq);
1583                         break;
1584                 default:
1585                         xlog_warn(
1586         "XFS: xlog_recover_reorder_trans: unrecognized type of log operation");
1587                         ASSERT(0);
1588                         return XFS_ERROR(EIO);
1589                 }
1590         }
1591         ASSERT(list_empty(&sort_list));
1592         return 0;
1593 }
1594
1595 /*
1596  * Build up the table of buf cancel records so that we don't replay
1597  * cancelled data in the second pass.  For buffer records that are
1598  * not cancel records, there is nothing to do here so we just return.
1599  *
1600  * If we get a cancel record which is already in the table, this indicates
1601  * that the buffer was cancelled multiple times.  In order to ensure
1602  * that during pass 2 we keep the record in the table until we reach its
1603  * last occurrence in the log, we keep a reference count in the cancel
1604  * record in the table to tell us how many times we expect to see this
1605  * record during the second pass.
1606  */
1607 STATIC void
1608 xlog_recover_do_buffer_pass1(
1609         xlog_t                  *log,
1610         xfs_buf_log_format_t    *buf_f)
1611 {
1612         xfs_buf_cancel_t        *bcp;
1613         xfs_buf_cancel_t        *nextp;
1614         xfs_buf_cancel_t        *prevp;
1615         xfs_buf_cancel_t        **bucket;
1616         xfs_daddr_t             blkno = 0;
1617         uint                    len = 0;
1618         ushort                  flags = 0;
1619
1620         switch (buf_f->blf_type) {
1621         case XFS_LI_BUF:
1622                 blkno = buf_f->blf_blkno;
1623                 len = buf_f->blf_len;
1624                 flags = buf_f->blf_flags;
1625                 break;
1626         }
1627
1628         /*
1629          * If this isn't a cancel buffer item, then just return.
1630          */
1631         if (!(flags & XFS_BLF_CANCEL)) {
1632                 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1633                 return;
1634         }
1635
1636         /*
1637          * Insert an xfs_buf_cancel record into the hash table of
1638          * them.  If there is already an identical record, bump
1639          * its reference count.
1640          */
1641         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1642                                           XLOG_BC_TABLE_SIZE];
1643         /*
1644          * If the hash bucket is empty then just insert a new record into
1645          * the bucket.
1646          */
1647         if (*bucket == NULL) {
1648                 bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1649                                                      KM_SLEEP);
1650                 bcp->bc_blkno = blkno;
1651                 bcp->bc_len = len;
1652                 bcp->bc_refcount = 1;
1653                 bcp->bc_next = NULL;
1654                 *bucket = bcp;
1655                 return;
1656         }
1657
1658         /*
1659          * The hash bucket is not empty, so search for duplicates of our
1660          * record.  If we find one them just bump its refcount.  If not
1661          * then add us at the end of the list.
1662          */
1663         prevp = NULL;
1664         nextp = *bucket;
1665         while (nextp != NULL) {
1666                 if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
1667                         nextp->bc_refcount++;
1668                         trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1669                         return;
1670                 }
1671                 prevp = nextp;
1672                 nextp = nextp->bc_next;
1673         }
1674         ASSERT(prevp != NULL);
1675         bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
1676                                              KM_SLEEP);
1677         bcp->bc_blkno = blkno;
1678         bcp->bc_len = len;
1679         bcp->bc_refcount = 1;
1680         bcp->bc_next = NULL;
1681         prevp->bc_next = bcp;
1682         trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1683 }
1684
1685 /*
1686  * Check to see whether the buffer being recovered has a corresponding
1687  * entry in the buffer cancel record table.  If it does then return 1
1688  * so that it will be cancelled, otherwise return 0.  If the buffer is
1689  * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1690  * the refcount on the entry in the table and remove it from the table
1691  * if this is the last reference.
1692  *
1693  * We remove the cancel record from the table when we encounter its
1694  * last occurrence in the log so that if the same buffer is re-used
1695  * again after its last cancellation we actually replay the changes
1696  * made at that point.
1697  */
1698 STATIC int
1699 xlog_check_buffer_cancelled(
1700         xlog_t                  *log,
1701         xfs_daddr_t             blkno,
1702         uint                    len,
1703         ushort                  flags)
1704 {
1705         xfs_buf_cancel_t        *bcp;
1706         xfs_buf_cancel_t        *prevp;
1707         xfs_buf_cancel_t        **bucket;
1708
1709         if (log->l_buf_cancel_table == NULL) {
1710                 /*
1711                  * There is nothing in the table built in pass one,
1712                  * so this buffer must not be cancelled.
1713                  */
1714                 ASSERT(!(flags & XFS_BLF_CANCEL));
1715                 return 0;
1716         }
1717
1718         bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
1719                                           XLOG_BC_TABLE_SIZE];
1720         bcp = *bucket;
1721         if (bcp == NULL) {
1722                 /*
1723                  * There is no corresponding entry in the table built
1724                  * in pass one, so this buffer has not been cancelled.
1725                  */
1726                 ASSERT(!(flags & XFS_BLF_CANCEL));
1727                 return 0;
1728         }
1729
1730         /*
1731          * Search for an entry in the buffer cancel table that
1732          * matches our buffer.
1733          */
1734         prevp = NULL;
1735         while (bcp != NULL) {
1736                 if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
1737                         /*
1738                          * We've go a match, so return 1 so that the
1739                          * recovery of this buffer is cancelled.
1740                          * If this buffer is actually a buffer cancel
1741                          * log item, then decrement the refcount on the
1742                          * one in the table and remove it if this is the
1743                          * last reference.
1744                          */
1745                         if (flags & XFS_BLF_CANCEL) {
1746                                 bcp->bc_refcount--;
1747                                 if (bcp->bc_refcount == 0) {
1748                                         if (prevp == NULL) {
1749                                                 *bucket = bcp->bc_next;
1750                                         } else {
1751                                                 prevp->bc_next = bcp->bc_next;
1752                                         }
1753                                         kmem_free(bcp);
1754                                 }
1755                         }
1756                         return 1;
1757                 }
1758                 prevp = bcp;
1759                 bcp = bcp->bc_next;
1760         }
1761         /*
1762          * We didn't find a corresponding entry in the table, so
1763          * return 0 so that the buffer is NOT cancelled.
1764          */
1765         ASSERT(!(flags & XFS_BLF_CANCEL));
1766         return 0;
1767 }
1768
1769 STATIC int
1770 xlog_recover_do_buffer_pass2(
1771         xlog_t                  *log,
1772         xfs_buf_log_format_t    *buf_f)
1773 {
1774         xfs_daddr_t             blkno = 0;
1775         ushort                  flags = 0;
1776         uint                    len = 0;
1777
1778         switch (buf_f->blf_type) {
1779         case XFS_LI_BUF:
1780                 blkno = buf_f->blf_blkno;
1781                 flags = buf_f->blf_flags;
1782                 len = buf_f->blf_len;
1783                 break;
1784         }
1785
1786         return xlog_check_buffer_cancelled(log, blkno, len, flags);
1787 }
1788
1789 /*
1790  * Perform recovery for a buffer full of inodes.  In these buffers,
1791  * the only data which should be recovered is that which corresponds
1792  * to the di_next_unlinked pointers in the on disk inode structures.
1793  * The rest of the data for the inodes is always logged through the
1794  * inodes themselves rather than the inode buffer and is recovered
1795  * in xlog_recover_do_inode_trans().
1796  *
1797  * The only time when buffers full of inodes are fully recovered is
1798  * when the buffer is full of newly allocated inodes.  In this case
1799  * the buffer will not be marked as an inode buffer and so will be
1800  * sent to xlog_recover_do_reg_buffer() below during recovery.
1801  */
1802 STATIC int
1803 xlog_recover_do_inode_buffer(
1804         xfs_mount_t             *mp,
1805         xlog_recover_item_t     *item,
1806         xfs_buf_t               *bp,
1807         xfs_buf_log_format_t    *buf_f)
1808 {
1809         int                     i;
1810         int                     item_index;
1811         int                     bit;
1812         int                     nbits;
1813         int                     reg_buf_offset;
1814         int                     reg_buf_bytes;
1815         int                     next_unlinked_offset;
1816         int                     inodes_per_buf;
1817         xfs_agino_t             *logged_nextp;
1818         xfs_agino_t             *buffer_nextp;
1819         unsigned int            *data_map = NULL;
1820         unsigned int            map_size = 0;
1821
1822         trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1823
1824         switch (buf_f->blf_type) {
1825         case XFS_LI_BUF:
1826                 data_map = buf_f->blf_data_map;
1827                 map_size = buf_f->blf_map_size;
1828                 break;
1829         }
1830         /*
1831          * Set the variables corresponding to the current region to
1832          * 0 so that we'll initialize them on the first pass through
1833          * the loop.
1834          */
1835         reg_buf_offset = 0;
1836         reg_buf_bytes = 0;
1837         bit = 0;
1838         nbits = 0;
1839         item_index = 0;
1840         inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
1841         for (i = 0; i < inodes_per_buf; i++) {
1842                 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1843                         offsetof(xfs_dinode_t, di_next_unlinked);
1844
1845                 while (next_unlinked_offset >=
1846                        (reg_buf_offset + reg_buf_bytes)) {
1847                         /*
1848                          * The next di_next_unlinked field is beyond
1849                          * the current logged region.  Find the next
1850                          * logged region that contains or is beyond
1851                          * the current di_next_unlinked field.
1852                          */
1853                         bit += nbits;
1854                         bit = xfs_next_bit(data_map, map_size, bit);
1855
1856                         /*
1857                          * If there are no more logged regions in the
1858                          * buffer, then we're done.
1859                          */
1860                         if (bit == -1) {
1861                                 return 0;
1862                         }
1863
1864                         nbits = xfs_contig_bits(data_map, map_size,
1865                                                          bit);
1866                         ASSERT(nbits > 0);
1867                         reg_buf_offset = bit << XFS_BLF_SHIFT;
1868                         reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1869                         item_index++;
1870                 }
1871
1872                 /*
1873                  * If the current logged region starts after the current
1874                  * di_next_unlinked field, then move on to the next
1875                  * di_next_unlinked field.
1876                  */
1877                 if (next_unlinked_offset < reg_buf_offset) {
1878                         continue;
1879                 }
1880
1881                 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1882                 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1883                 ASSERT((reg_buf_offset + reg_buf_bytes) <= XFS_BUF_COUNT(bp));
1884
1885                 /*
1886                  * The current logged region contains a copy of the
1887                  * current di_next_unlinked field.  Extract its value
1888                  * and copy it to the buffer copy.
1889                  */
1890                 logged_nextp = item->ri_buf[item_index].i_addr +
1891                                 next_unlinked_offset - reg_buf_offset;
1892                 if (unlikely(*logged_nextp == 0)) {
1893                         xfs_fs_cmn_err(CE_ALERT, mp,
1894                                 "bad inode buffer log record (ptr = 0x%p, bp = 0x%p).  XFS trying to replay bad (0) inode di_next_unlinked field",
1895                                 item, bp);
1896                         XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1897                                          XFS_ERRLEVEL_LOW, mp);
1898                         return XFS_ERROR(EFSCORRUPTED);
1899                 }
1900
1901                 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1902                                               next_unlinked_offset);
1903                 *buffer_nextp = *logged_nextp;
1904         }
1905
1906         return 0;
1907 }
1908
1909 /*
1910  * Perform a 'normal' buffer recovery.  Each logged region of the
1911  * buffer should be copied over the corresponding region in the
1912  * given buffer.  The bitmap in the buf log format structure indicates
1913  * where to place the logged data.
1914  */
1915 /*ARGSUSED*/
1916 STATIC void
1917 xlog_recover_do_reg_buffer(
1918         struct xfs_mount        *mp,
1919         xlog_recover_item_t     *item,
1920         xfs_buf_t               *bp,
1921         xfs_buf_log_format_t    *buf_f)
1922 {
1923         int                     i;
1924         int                     bit;
1925         int                     nbits;
1926         unsigned int            *data_map = NULL;
1927         unsigned int            map_size = 0;
1928         int                     error;
1929
1930         trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
1931
1932         switch (buf_f->blf_type) {
1933         case XFS_LI_BUF:
1934                 data_map = buf_f->blf_data_map;
1935                 map_size = buf_f->blf_map_size;
1936                 break;
1937         }
1938         bit = 0;
1939         i = 1;  /* 0 is the buf format structure */
1940         while (1) {
1941                 bit = xfs_next_bit(data_map, map_size, bit);
1942                 if (bit == -1)
1943                         break;
1944                 nbits = xfs_contig_bits(data_map, map_size, bit);
1945                 ASSERT(nbits > 0);
1946                 ASSERT(item->ri_buf[i].i_addr != NULL);
1947                 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
1948                 ASSERT(XFS_BUF_COUNT(bp) >=
1949                        ((uint)bit << XFS_BLF_SHIFT)+(nbits<<XFS_BLF_SHIFT));
1950
1951                 /*
1952                  * Do a sanity check if this is a dquot buffer. Just checking
1953                  * the first dquot in the buffer should do. XXXThis is
1954                  * probably a good thing to do for other buf types also.
1955                  */
1956                 error = 0;
1957                 if (buf_f->blf_flags &
1958                    (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
1959                         if (item->ri_buf[i].i_addr == NULL) {
1960                                 cmn_err(CE_ALERT,
1961                                         "XFS: NULL dquot in %s.", __func__);
1962                                 goto next;
1963                         }
1964                         if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
1965                                 cmn_err(CE_ALERT,
1966                                         "XFS: dquot too small (%d) in %s.",
1967                                         item->ri_buf[i].i_len, __func__);
1968                                 goto next;
1969                         }
1970                         error = xfs_qm_dqcheck(item->ri_buf[i].i_addr,
1971                                                -1, 0, XFS_QMOPT_DOWARN,
1972                                                "dquot_buf_recover");
1973                         if (error)
1974                                 goto next;
1975                 }
1976
1977                 memcpy(xfs_buf_offset(bp,
1978                         (uint)bit << XFS_BLF_SHIFT),    /* dest */
1979                         item->ri_buf[i].i_addr,         /* source */
1980                         nbits<<XFS_BLF_SHIFT);          /* length */
1981  next:
1982                 i++;
1983                 bit += nbits;
1984         }
1985
1986         /* Shouldn't be any more regions */
1987         ASSERT(i == item->ri_total);
1988 }
1989
1990 /*
1991  * Do some primitive error checking on ondisk dquot data structures.
1992  */
1993 int
1994 xfs_qm_dqcheck(
1995         xfs_disk_dquot_t *ddq,
1996         xfs_dqid_t       id,
1997         uint             type,    /* used only when IO_dorepair is true */
1998         uint             flags,
1999         char             *str)
2000 {
2001         xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
2002         int             errs = 0;
2003
2004         /*
2005          * We can encounter an uninitialized dquot buffer for 2 reasons:
2006          * 1. If we crash while deleting the quotainode(s), and those blks got
2007          *    used for user data. This is because we take the path of regular
2008          *    file deletion; however, the size field of quotainodes is never
2009          *    updated, so all the tricks that we play in itruncate_finish
2010          *    don't quite matter.
2011          *
2012          * 2. We don't play the quota buffers when there's a quotaoff logitem.
2013          *    But the allocation will be replayed so we'll end up with an
2014          *    uninitialized quota block.
2015          *
2016          * This is all fine; things are still consistent, and we haven't lost
2017          * any quota information. Just don't complain about bad dquot blks.
2018          */
2019         if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
2020                 if (flags & XFS_QMOPT_DOWARN)
2021                         cmn_err(CE_ALERT,
2022                         "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2023                         str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2024                 errs++;
2025         }
2026         if (ddq->d_version != XFS_DQUOT_VERSION) {
2027                 if (flags & XFS_QMOPT_DOWARN)
2028                         cmn_err(CE_ALERT,
2029                         "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2030                         str, id, ddq->d_version, XFS_DQUOT_VERSION);
2031                 errs++;
2032         }
2033
2034         if (ddq->d_flags != XFS_DQ_USER &&
2035             ddq->d_flags != XFS_DQ_PROJ &&
2036             ddq->d_flags != XFS_DQ_GROUP) {
2037                 if (flags & XFS_QMOPT_DOWARN)
2038                         cmn_err(CE_ALERT,
2039                         "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2040                         str, id, ddq->d_flags);
2041                 errs++;
2042         }
2043
2044         if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2045                 if (flags & XFS_QMOPT_DOWARN)
2046                         cmn_err(CE_ALERT,
2047                         "%s : ondisk-dquot 0x%p, ID mismatch: "
2048                         "0x%x expected, found id 0x%x",
2049                         str, ddq, id, be32_to_cpu(ddq->d_id));
2050                 errs++;
2051         }
2052
2053         if (!errs && ddq->d_id) {
2054                 if (ddq->d_blk_softlimit &&
2055                     be64_to_cpu(ddq->d_bcount) >=
2056                                 be64_to_cpu(ddq->d_blk_softlimit)) {
2057                         if (!ddq->d_btimer) {
2058                                 if (flags & XFS_QMOPT_DOWARN)
2059                                         cmn_err(CE_ALERT,
2060                                         "%s : Dquot ID 0x%x (0x%p) "
2061                                         "BLK TIMER NOT STARTED",
2062                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2063                                 errs++;
2064                         }
2065                 }
2066                 if (ddq->d_ino_softlimit &&
2067                     be64_to_cpu(ddq->d_icount) >=
2068                                 be64_to_cpu(ddq->d_ino_softlimit)) {
2069                         if (!ddq->d_itimer) {
2070                                 if (flags & XFS_QMOPT_DOWARN)
2071                                         cmn_err(CE_ALERT,
2072                                         "%s : Dquot ID 0x%x (0x%p) "
2073                                         "INODE TIMER NOT STARTED",
2074                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2075                                 errs++;
2076                         }
2077                 }
2078                 if (ddq->d_rtb_softlimit &&
2079                     be64_to_cpu(ddq->d_rtbcount) >=
2080                                 be64_to_cpu(ddq->d_rtb_softlimit)) {
2081                         if (!ddq->d_rtbtimer) {
2082                                 if (flags & XFS_QMOPT_DOWARN)
2083                                         cmn_err(CE_ALERT,
2084                                         "%s : Dquot ID 0x%x (0x%p) "
2085                                         "RTBLK TIMER NOT STARTED",
2086                                         str, (int)be32_to_cpu(ddq->d_id), ddq);
2087                                 errs++;
2088                         }
2089                 }
2090         }
2091
2092         if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2093                 return errs;
2094
2095         if (flags & XFS_QMOPT_DOWARN)
2096                 cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
2097
2098         /*
2099          * Typically, a repair is only requested by quotacheck.
2100          */
2101         ASSERT(id != -1);
2102         ASSERT(flags & XFS_QMOPT_DQREPAIR);
2103         memset(d, 0, sizeof(xfs_dqblk_t));
2104
2105         d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2106         d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2107         d->dd_diskdq.d_flags = type;
2108         d->dd_diskdq.d_id = cpu_to_be32(id);
2109
2110         return errs;
2111 }
2112
2113 /*
2114  * Perform a dquot buffer recovery.
2115  * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2116  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2117  * Else, treat it as a regular buffer and do recovery.
2118  */
2119 STATIC void
2120 xlog_recover_do_dquot_buffer(
2121         xfs_mount_t             *mp,
2122         xlog_t                  *log,
2123         xlog_recover_item_t     *item,
2124         xfs_buf_t               *bp,
2125         xfs_buf_log_format_t    *buf_f)
2126 {
2127         uint                    type;
2128
2129         trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2130
2131         /*
2132          * Filesystems are required to send in quota flags at mount time.
2133          */
2134         if (mp->m_qflags == 0) {
2135                 return;
2136         }
2137
2138         type = 0;
2139         if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2140                 type |= XFS_DQ_USER;
2141         if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2142                 type |= XFS_DQ_PROJ;
2143         if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2144                 type |= XFS_DQ_GROUP;
2145         /*
2146          * This type of quotas was turned off, so ignore this buffer
2147          */
2148         if (log->l_quotaoffs_flag & type)
2149                 return;
2150
2151         xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2152 }
2153
2154 /*
2155  * This routine replays a modification made to a buffer at runtime.
2156  * There are actually two types of buffer, regular and inode, which
2157  * are handled differently.  Inode buffers are handled differently
2158  * in that we only recover a specific set of data from them, namely
2159  * the inode di_next_unlinked fields.  This is because all other inode
2160  * data is actually logged via inode records and any data we replay
2161  * here which overlaps that may be stale.
2162  *
2163  * When meta-data buffers are freed at run time we log a buffer item
2164  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2165  * of the buffer in the log should not be replayed at recovery time.
2166  * This is so that if the blocks covered by the buffer are reused for
2167  * file data before we crash we don't end up replaying old, freed
2168  * meta-data into a user's file.
2169  *
2170  * To handle the cancellation of buffer log items, we make two passes
2171  * over the log during recovery.  During the first we build a table of
2172  * those buffers which have been cancelled, and during the second we
2173  * only replay those buffers which do not have corresponding cancel
2174  * records in the table.  See xlog_recover_do_buffer_pass[1,2] above
2175  * for more details on the implementation of the table of cancel records.
2176  */
2177 STATIC int
2178 xlog_recover_do_buffer_trans(
2179         xlog_t                  *log,
2180         xlog_recover_item_t     *item,
2181         int                     pass)
2182 {
2183         xfs_buf_log_format_t    *buf_f = item->ri_buf[0].i_addr;
2184         xfs_mount_t             *mp;
2185         xfs_buf_t               *bp;
2186         int                     error;
2187         int                     cancel;
2188         xfs_daddr_t             blkno;
2189         int                     len;
2190         ushort                  flags;
2191         uint                    buf_flags;
2192
2193         if (pass == XLOG_RECOVER_PASS1) {
2194                 /*
2195                  * In this pass we're only looking for buf items
2196                  * with the XFS_BLF_CANCEL bit set.
2197                  */
2198                 xlog_recover_do_buffer_pass1(log, buf_f);
2199                 return 0;
2200         } else {
2201                 /*
2202                  * In this pass we want to recover all the buffers
2203                  * which have not been cancelled and are not
2204                  * cancellation buffers themselves.  The routine
2205                  * we call here will tell us whether or not to
2206                  * continue with the replay of this buffer.
2207                  */
2208                 cancel = xlog_recover_do_buffer_pass2(log, buf_f);
2209                 if (cancel) {
2210                         trace_xfs_log_recover_buf_cancel(log, buf_f);
2211                         return 0;
2212                 }
2213         }
2214         trace_xfs_log_recover_buf_recover(log, buf_f);
2215         switch (buf_f->blf_type) {
2216         case XFS_LI_BUF:
2217                 blkno = buf_f->blf_blkno;
2218                 len = buf_f->blf_len;
2219                 flags = buf_f->blf_flags;
2220                 break;
2221         default:
2222                 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
2223                         "xfs_log_recover: unknown buffer type 0x%x, logdev %s",
2224                         buf_f->blf_type, log->l_mp->m_logname ?
2225                         log->l_mp->m_logname : "internal");
2226                 XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
2227                                  XFS_ERRLEVEL_LOW, log->l_mp);
2228                 return XFS_ERROR(EFSCORRUPTED);
2229         }
2230
2231         mp = log->l_mp;
2232         buf_flags = XBF_LOCK;
2233         if (!(flags & XFS_BLF_INODE_BUF))
2234                 buf_flags |= XBF_MAPPED;
2235
2236         bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
2237         if (XFS_BUF_ISERROR(bp)) {
2238                 xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
2239                                   bp, blkno);
2240                 error = XFS_BUF_GETERROR(bp);
2241                 xfs_buf_relse(bp);
2242                 return error;
2243         }
2244
2245         error = 0;
2246         if (flags & XFS_BLF_INODE_BUF) {
2247                 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2248         } else if (flags &
2249                   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2250                 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2251         } else {
2252                 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2253         }
2254         if (error)
2255                 return XFS_ERROR(error);
2256
2257         /*
2258          * Perform delayed write on the buffer.  Asynchronous writes will be
2259          * slower when taking into account all the buffers to be flushed.
2260          *
2261          * Also make sure that only inode buffers with good sizes stay in
2262          * the buffer cache.  The kernel moves inodes in buffers of 1 block
2263          * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger.  The inode
2264          * buffers in the log can be a different size if the log was generated
2265          * by an older kernel using unclustered inode buffers or a newer kernel
2266          * running with a different inode cluster size.  Regardless, if the
2267          * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2268          * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2269          * the buffer out of the buffer cache so that the buffer won't
2270          * overlap with future reads of those inodes.
2271          */
2272         if (XFS_DINODE_MAGIC ==
2273             be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2274             (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize,
2275                         (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2276                 XFS_BUF_STALE(bp);
2277                 error = xfs_bwrite(mp, bp);
2278         } else {
2279                 ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2280                 bp->b_mount = mp;
2281                 XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2282                 xfs_bdwrite(mp, bp);
2283         }
2284
2285         return (error);
2286 }
2287
2288 STATIC int
2289 xlog_recover_do_inode_trans(
2290         xlog_t                  *log,
2291         xlog_recover_item_t     *item,
2292         int                     pass)
2293 {
2294         xfs_inode_log_format_t  *in_f;
2295         xfs_mount_t             *mp;
2296         xfs_buf_t               *bp;
2297         xfs_dinode_t            *dip;
2298         xfs_ino_t               ino;
2299         int                     len;
2300         xfs_caddr_t             src;
2301         xfs_caddr_t             dest;
2302         int                     error;
2303         int                     attr_index;
2304         uint                    fields;
2305         xfs_icdinode_t          *dicp;
2306         int                     need_free = 0;
2307
2308         if (pass == XLOG_RECOVER_PASS1) {
2309                 return 0;
2310         }
2311
2312         if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2313                 in_f = item->ri_buf[0].i_addr;
2314         } else {
2315                 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2316                 need_free = 1;
2317                 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2318                 if (error)
2319                         goto error;
2320         }
2321         ino = in_f->ilf_ino;
2322         mp = log->l_mp;
2323
2324         /*
2325          * Inode buffers can be freed, look out for it,
2326          * and do not replay the inode.
2327          */
2328         if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2329                                         in_f->ilf_len, 0)) {
2330                 error = 0;
2331                 trace_xfs_log_recover_inode_cancel(log, in_f);
2332                 goto error;
2333         }
2334         trace_xfs_log_recover_inode_recover(log, in_f);
2335
2336         bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
2337                           XBF_LOCK);
2338         if (XFS_BUF_ISERROR(bp)) {
2339                 xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
2340                                   bp, in_f->ilf_blkno);
2341                 error = XFS_BUF_GETERROR(bp);
2342                 xfs_buf_relse(bp);
2343                 goto error;
2344         }
2345         error = 0;
2346         ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2347         dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2348
2349         /*
2350          * Make sure the place we're flushing out to really looks
2351          * like an inode!
2352          */
2353         if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
2354                 xfs_buf_relse(bp);
2355                 xfs_fs_cmn_err(CE_ALERT, mp,
2356                         "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
2357                         dip, bp, ino);
2358                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
2359                                  XFS_ERRLEVEL_LOW, mp);
2360                 error = EFSCORRUPTED;
2361                 goto error;
2362         }
2363         dicp = item->ri_buf[1].i_addr;
2364         if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2365                 xfs_buf_relse(bp);
2366                 xfs_fs_cmn_err(CE_ALERT, mp,
2367                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
2368                         item, ino);
2369                 XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
2370                                  XFS_ERRLEVEL_LOW, mp);
2371                 error = EFSCORRUPTED;
2372                 goto error;
2373         }
2374
2375         /* Skip replay when the on disk inode is newer than the log one */
2376         if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2377                 /*
2378                  * Deal with the wrap case, DI_MAX_FLUSH is less
2379                  * than smaller numbers
2380                  */
2381                 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2382                     dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2383                         /* do nothing */
2384                 } else {
2385                         xfs_buf_relse(bp);
2386                         trace_xfs_log_recover_inode_skip(log, in_f);
2387                         error = 0;
2388                         goto error;
2389                 }
2390         }
2391         /* Take the opportunity to reset the flush iteration count */
2392         dicp->di_flushiter = 0;
2393
2394         if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
2395                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2396                     (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2397                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
2398                                          XFS_ERRLEVEL_LOW, mp, dicp);
2399                         xfs_buf_relse(bp);
2400                         xfs_fs_cmn_err(CE_ALERT, mp,
2401                                 "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2402                                 item, dip, bp, ino);
2403                         error = EFSCORRUPTED;
2404                         goto error;
2405                 }
2406         } else if (unlikely((dicp->di_mode & S_IFMT) == S_IFDIR)) {
2407                 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2408                     (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2409                     (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2410                         XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
2411                                              XFS_ERRLEVEL_LOW, mp, dicp);
2412                         xfs_buf_relse(bp);
2413                         xfs_fs_cmn_err(CE_ALERT, mp,
2414                                 "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2415                                 item, dip, bp, ino);
2416                         error = EFSCORRUPTED;
2417                         goto error;
2418                 }
2419         }
2420         if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2421                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
2422                                      XFS_ERRLEVEL_LOW, mp, dicp);
2423                 xfs_buf_relse(bp);
2424                 xfs_fs_cmn_err(CE_ALERT, mp,
2425                         "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2426                         item, dip, bp, ino,
2427                         dicp->di_nextents + dicp->di_anextents,
2428                         dicp->di_nblocks);
2429                 error = EFSCORRUPTED;
2430                 goto error;
2431         }
2432         if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2433                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
2434                                      XFS_ERRLEVEL_LOW, mp, dicp);
2435                 xfs_buf_relse(bp);
2436                 xfs_fs_cmn_err(CE_ALERT, mp,
2437                         "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
2438                         item, dip, bp, ino, dicp->di_forkoff);
2439                 error = EFSCORRUPTED;
2440                 goto error;
2441         }
2442         if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
2443                 XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
2444                                      XFS_ERRLEVEL_LOW, mp, dicp);
2445                 xfs_buf_relse(bp);
2446                 xfs_fs_cmn_err(CE_ALERT, mp,
2447                         "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p",
2448                         item->ri_buf[1].i_len, item);
2449                 error = EFSCORRUPTED;
2450                 goto error;
2451         }
2452
2453         /* The core is in in-core format */
2454         xfs_dinode_to_disk(dip, item->ri_buf[1].i_addr);
2455
2456         /* the rest is in on-disk format */
2457         if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) {
2458                 memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode),
2459                         item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode),
2460                         item->ri_buf[1].i_len  - sizeof(struct xfs_icdinode));
2461         }
2462
2463         fields = in_f->ilf_fields;
2464         switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2465         case XFS_ILOG_DEV:
2466                 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2467                 break;
2468         case XFS_ILOG_UUID:
2469                 memcpy(XFS_DFORK_DPTR(dip),
2470                        &in_f->ilf_u.ilfu_uuid,
2471                        sizeof(uuid_t));
2472                 break;
2473         }
2474
2475         if (in_f->ilf_size == 2)
2476                 goto write_inode_buffer;
2477         len = item->ri_buf[2].i_len;
2478         src = item->ri_buf[2].i_addr;
2479         ASSERT(in_f->ilf_size <= 4);
2480         ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2481         ASSERT(!(fields & XFS_ILOG_DFORK) ||
2482                (len == in_f->ilf_dsize));
2483
2484         switch (fields & XFS_ILOG_DFORK) {
2485         case XFS_ILOG_DDATA:
2486         case XFS_ILOG_DEXT:
2487                 memcpy(XFS_DFORK_DPTR(dip), src, len);
2488                 break;
2489
2490         case XFS_ILOG_DBROOT:
2491                 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2492                                  (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2493                                  XFS_DFORK_DSIZE(dip, mp));
2494                 break;
2495
2496         default:
2497                 /*
2498                  * There are no data fork flags set.
2499                  */
2500                 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2501                 break;
2502         }
2503
2504         /*
2505          * If we logged any attribute data, recover it.  There may or
2506          * may not have been any other non-core data logged in this
2507          * transaction.
2508          */
2509         if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2510                 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2511                         attr_index = 3;
2512                 } else {
2513                         attr_index = 2;
2514                 }
2515                 len = item->ri_buf[attr_index].i_len;
2516                 src = item->ri_buf[attr_index].i_addr;
2517                 ASSERT(len == in_f->ilf_asize);
2518
2519                 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2520                 case XFS_ILOG_ADATA:
2521                 case XFS_ILOG_AEXT:
2522                         dest = XFS_DFORK_APTR(dip);
2523                         ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2524                         memcpy(dest, src, len);
2525                         break;
2526
2527                 case XFS_ILOG_ABROOT:
2528                         dest = XFS_DFORK_APTR(dip);
2529                         xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2530                                          len, (xfs_bmdr_block_t*)dest,
2531                                          XFS_DFORK_ASIZE(dip, mp));
2532                         break;
2533
2534                 default:
2535                         xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
2536                         ASSERT(0);
2537                         xfs_buf_relse(bp);
2538                         error = EIO;
2539                         goto error;
2540                 }
2541         }
2542
2543 write_inode_buffer:
2544         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2545         bp->b_mount = mp;
2546         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2547         xfs_bdwrite(mp, bp);
2548 error:
2549         if (need_free)
2550                 kmem_free(in_f);
2551         return XFS_ERROR(error);
2552 }
2553
2554 /*
2555  * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
2556  * structure, so that we know not to do any dquot item or dquot buffer recovery,
2557  * of that type.
2558  */
2559 STATIC int
2560 xlog_recover_do_quotaoff_trans(
2561         xlog_t                  *log,
2562         xlog_recover_item_t     *item,
2563         int                     pass)
2564 {
2565         xfs_qoff_logformat_t    *qoff_f;
2566
2567         if (pass == XLOG_RECOVER_PASS2) {
2568                 return (0);
2569         }
2570
2571         qoff_f = item->ri_buf[0].i_addr;
2572         ASSERT(qoff_f);
2573
2574         /*
2575          * The logitem format's flag tells us if this was user quotaoff,
2576          * group/project quotaoff or both.
2577          */
2578         if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2579                 log->l_quotaoffs_flag |= XFS_DQ_USER;
2580         if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2581                 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2582         if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2583                 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2584
2585         return (0);
2586 }
2587
2588 /*
2589  * Recover a dquot record
2590  */
2591 STATIC int
2592 xlog_recover_do_dquot_trans(
2593         xlog_t                  *log,
2594         xlog_recover_item_t     *item,
2595         int                     pass)
2596 {
2597         xfs_mount_t             *mp;
2598         xfs_buf_t               *bp;
2599         struct xfs_disk_dquot   *ddq, *recddq;
2600         int                     error;
2601         xfs_dq_logformat_t      *dq_f;
2602         uint                    type;
2603
2604         if (pass == XLOG_RECOVER_PASS1) {
2605                 return 0;
2606         }
2607         mp = log->l_mp;
2608
2609         /*
2610          * Filesystems are required to send in quota flags at mount time.
2611          */
2612         if (mp->m_qflags == 0)
2613                 return (0);
2614
2615         recddq = item->ri_buf[1].i_addr;
2616         if (recddq == NULL) {
2617                 cmn_err(CE_ALERT,
2618                         "XFS: NULL dquot in %s.", __func__);
2619                 return XFS_ERROR(EIO);
2620         }
2621         if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2622                 cmn_err(CE_ALERT,
2623                         "XFS: dquot too small (%d) in %s.",
2624                         item->ri_buf[1].i_len, __func__);
2625                 return XFS_ERROR(EIO);
2626         }
2627
2628         /*
2629          * This type of quotas was turned off, so ignore this record.
2630          */
2631         type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2632         ASSERT(type);
2633         if (log->l_quotaoffs_flag & type)
2634                 return (0);
2635
2636         /*
2637          * At this point we know that quota was _not_ turned off.
2638          * Since the mount flags are not indicating to us otherwise, this
2639          * must mean that quota is on, and the dquot needs to be replayed.
2640          * Remember that we may not have fully recovered the superblock yet,
2641          * so we can't do the usual trick of looking at the SB quota bits.
2642          *
2643          * The other possibility, of course, is that the quota subsystem was
2644          * removed since the last mount - ENOSYS.
2645          */
2646         dq_f = item->ri_buf[0].i_addr;
2647         ASSERT(dq_f);
2648         if ((error = xfs_qm_dqcheck(recddq,
2649                            dq_f->qlf_id,
2650                            0, XFS_QMOPT_DOWARN,
2651                            "xlog_recover_do_dquot_trans (log copy)"))) {
2652                 return XFS_ERROR(EIO);
2653         }
2654         ASSERT(dq_f->qlf_len == 1);
2655
2656         error = xfs_read_buf(mp, mp->m_ddev_targp,
2657                              dq_f->qlf_blkno,
2658                              XFS_FSB_TO_BB(mp, dq_f->qlf_len),
2659                              0, &bp);
2660         if (error) {
2661                 xfs_ioerror_alert("xlog_recover_do..(read#3)", mp,
2662                                   bp, dq_f->qlf_blkno);
2663                 return error;
2664         }
2665         ASSERT(bp);
2666         ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2667
2668         /*
2669          * At least the magic num portion should be on disk because this
2670          * was among a chunk of dquots created earlier, and we did some
2671          * minimal initialization then.
2672          */
2673         if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2674                            "xlog_recover_do_dquot_trans")) {
2675                 xfs_buf_relse(bp);
2676                 return XFS_ERROR(EIO);
2677         }
2678
2679         memcpy(ddq, recddq, item->ri_buf[1].i_len);
2680
2681         ASSERT(dq_f->qlf_size == 2);
2682         ASSERT(bp->b_mount == NULL || bp->b_mount == mp);
2683         bp->b_mount = mp;
2684         XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
2685         xfs_bdwrite(mp, bp);
2686
2687         return (0);
2688 }
2689
2690 /*
2691  * This routine is called to create an in-core extent free intent
2692  * item from the efi format structure which was logged on disk.
2693  * It allocates an in-core efi, copies the extents from the format
2694  * structure into it, and adds the efi to the AIL with the given
2695  * LSN.
2696  */
2697 STATIC int
2698 xlog_recover_do_efi_trans(
2699         xlog_t                  *log,
2700         xlog_recover_item_t     *item,
2701         xfs_lsn_t               lsn,
2702         int                     pass)
2703 {
2704         int                     error;
2705         xfs_mount_t             *mp;
2706         xfs_efi_log_item_t      *efip;
2707         xfs_efi_log_format_t    *efi_formatp;
2708
2709         if (pass == XLOG_RECOVER_PASS1) {
2710                 return 0;
2711         }
2712
2713         efi_formatp = item->ri_buf[0].i_addr;
2714
2715         mp = log->l_mp;
2716         efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2717         if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2718                                          &(efip->efi_format)))) {
2719                 xfs_efi_item_free(efip);
2720                 return error;
2721         }
2722         efip->efi_next_extent = efi_formatp->efi_nextents;
2723         efip->efi_flags |= XFS_EFI_COMMITTED;
2724
2725         spin_lock(&log->l_ailp->xa_lock);
2726         /*
2727          * xfs_trans_ail_update() drops the AIL lock.
2728          */
2729         xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
2730         return 0;
2731 }
2732
2733
2734 /*
2735  * This routine is called when an efd format structure is found in
2736  * a committed transaction in the log.  It's purpose is to cancel
2737  * the corresponding efi if it was still in the log.  To do this
2738  * it searches the AIL for the efi with an id equal to that in the
2739  * efd format structure.  If we find it, we remove the efi from the
2740  * AIL and free it.
2741  */
2742 STATIC void
2743 xlog_recover_do_efd_trans(
2744         xlog_t                  *log,
2745         xlog_recover_item_t     *item,
2746         int                     pass)
2747 {
2748         xfs_efd_log_format_t    *efd_formatp;
2749         xfs_efi_log_item_t      *efip = NULL;
2750         xfs_log_item_t          *lip;
2751         __uint64_t              efi_id;
2752         struct xfs_ail_cursor   cur;
2753         struct xfs_ail          *ailp = log->l_ailp;
2754
2755         if (pass == XLOG_RECOVER_PASS1) {
2756                 return;
2757         }
2758
2759         efd_formatp = item->ri_buf[0].i_addr;
2760         ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2761                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2762                (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2763                 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2764         efi_id = efd_formatp->efd_efi_id;
2765
2766         /*
2767          * Search for the efi with the id in the efd format structure
2768          * in the AIL.
2769          */
2770         spin_lock(&ailp->xa_lock);
2771         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2772         while (lip != NULL) {
2773                 if (lip->li_type == XFS_LI_EFI) {
2774                         efip = (xfs_efi_log_item_t *)lip;
2775                         if (efip->efi_format.efi_id == efi_id) {
2776                                 /*
2777                                  * xfs_trans_ail_delete() drops the
2778                                  * AIL lock.
2779                                  */
2780                                 xfs_trans_ail_delete(ailp, lip);
2781                                 xfs_efi_item_free(efip);
2782                                 spin_lock(&ailp->xa_lock);
2783                                 break;
2784                         }
2785                 }
2786                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2787         }
2788         xfs_trans_ail_cursor_done(ailp, &cur);
2789         spin_unlock(&ailp->xa_lock);
2790 }
2791
2792 /*
2793  * Perform the transaction
2794  *
2795  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
2796  * EFIs and EFDs get queued up by adding entries into the AIL for them.
2797  */
2798 STATIC int
2799 xlog_recover_do_trans(
2800         xlog_t                  *log,
2801         xlog_recover_t          *trans,
2802         int                     pass)
2803 {
2804         int                     error = 0;
2805         xlog_recover_item_t     *item;
2806
2807         error = xlog_recover_reorder_trans(log, trans, pass);
2808         if (error)
2809                 return error;
2810
2811         list_for_each_entry(item, &trans->r_itemq, ri_list) {
2812                 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2813                 switch (ITEM_TYPE(item)) {
2814                 case XFS_LI_BUF:
2815                         error = xlog_recover_do_buffer_trans(log, item, pass);
2816                         break;
2817                 case XFS_LI_INODE:
2818                         error = xlog_recover_do_inode_trans(log, item, pass);
2819                         break;
2820                 case XFS_LI_EFI:
2821                         error = xlog_recover_do_efi_trans(log, item,
2822                                                           trans->r_lsn, pass);
2823                         break;
2824                 case XFS_LI_EFD:
2825                         xlog_recover_do_efd_trans(log, item, pass);
2826                         error = 0;
2827                         break;
2828                 case XFS_LI_DQUOT:
2829                         error = xlog_recover_do_dquot_trans(log, item, pass);
2830                         break;
2831                 case XFS_LI_QUOTAOFF:
2832                         error = xlog_recover_do_quotaoff_trans(log, item,
2833                                                                pass);
2834                         break;
2835                 default:
2836                         xlog_warn(
2837         "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
2838                         ASSERT(0);
2839                         error = XFS_ERROR(EIO);
2840                         break;
2841                 }
2842
2843                 if (error)
2844                         return error;
2845         }
2846
2847         return 0;
2848 }
2849
2850 /*
2851  * Free up any resources allocated by the transaction
2852  *
2853  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2854  */
2855 STATIC void
2856 xlog_recover_free_trans(
2857         xlog_recover_t          *trans)
2858 {
2859         xlog_recover_item_t     *item, *n;
2860         int                     i;
2861
2862         list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2863                 /* Free the regions in the item. */
2864                 list_del(&item->ri_list);
2865                 for (i = 0; i < item->ri_cnt; i++)
2866                         kmem_free(item->ri_buf[i].i_addr);
2867                 /* Free the item itself */
2868                 kmem_free(item->ri_buf);
2869                 kmem_free(item);
2870         }
2871         /* Free the transaction recover structure */
2872         kmem_free(trans);
2873 }
2874
2875 STATIC int
2876 xlog_recover_commit_trans(
2877         xlog_t                  *log,
2878         xlog_recover_t          *trans,
2879         int                     pass)
2880 {
2881         int                     error;
2882
2883         hlist_del(&trans->r_list);
2884         if ((error = xlog_recover_do_trans(log, trans, pass)))
2885                 return error;
2886         xlog_recover_free_trans(trans);                 /* no error */
2887         return 0;
2888 }
2889
2890 STATIC int
2891 xlog_recover_unmount_trans(
2892         xlog_recover_t          *trans)
2893 {
2894         /* Do nothing now */
2895         xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR");
2896         return 0;
2897 }
2898
2899 /*
2900  * There are two valid states of the r_state field.  0 indicates that the
2901  * transaction structure is in a normal state.  We have either seen the
2902  * start of the transaction or the last operation we added was not a partial
2903  * operation.  If the last operation we added to the transaction was a
2904  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2905  *
2906  * NOTE: skip LRs with 0 data length.
2907  */
2908 STATIC int
2909 xlog_recover_process_data(
2910         xlog_t                  *log,
2911         struct hlist_head       rhash[],
2912         xlog_rec_header_t       *rhead,
2913         xfs_caddr_t             dp,
2914         int                     pass)
2915 {
2916         xfs_caddr_t             lp;
2917         int                     num_logops;
2918         xlog_op_header_t        *ohead;
2919         xlog_recover_t          *trans;
2920         xlog_tid_t              tid;
2921         int                     error;
2922         unsigned long           hash;
2923         uint                    flags;
2924
2925         lp = dp + be32_to_cpu(rhead->h_len);
2926         num_logops = be32_to_cpu(rhead->h_num_logops);
2927
2928         /* check the log format matches our own - else we can't recover */
2929         if (xlog_header_check_recover(log->l_mp, rhead))
2930                 return (XFS_ERROR(EIO));
2931
2932         while ((dp < lp) && num_logops) {
2933                 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
2934                 ohead = (xlog_op_header_t *)dp;
2935                 dp += sizeof(xlog_op_header_t);
2936                 if (ohead->oh_clientid != XFS_TRANSACTION &&
2937                     ohead->oh_clientid != XFS_LOG) {
2938                         xlog_warn(
2939                 "XFS: xlog_recover_process_data: bad clientid");
2940                         ASSERT(0);
2941                         return (XFS_ERROR(EIO));
2942                 }
2943                 tid = be32_to_cpu(ohead->oh_tid);
2944                 hash = XLOG_RHASH(tid);
2945                 trans = xlog_recover_find_tid(&rhash[hash], tid);
2946                 if (trans == NULL) {               /* not found; add new tid */
2947                         if (ohead->oh_flags & XLOG_START_TRANS)
2948                                 xlog_recover_new_tid(&rhash[hash], tid,
2949                                         be64_to_cpu(rhead->h_lsn));
2950                 } else {
2951                         if (dp + be32_to_cpu(ohead->oh_len) > lp) {
2952                                 xlog_warn(
2953                         "XFS: xlog_recover_process_data: bad length");
2954                                 WARN_ON(1);
2955                                 return (XFS_ERROR(EIO));
2956                         }
2957                         flags = ohead->oh_flags & ~XLOG_END_TRANS;
2958                         if (flags & XLOG_WAS_CONT_TRANS)
2959                                 flags &= ~XLOG_CONTINUE_TRANS;
2960                         switch (flags) {
2961                         case XLOG_COMMIT_TRANS:
2962                                 error = xlog_recover_commit_trans(log,
2963                                                                 trans, pass);
2964                                 break;
2965                         case XLOG_UNMOUNT_TRANS:
2966                                 error = xlog_recover_unmount_trans(trans);
2967                                 break;
2968                         case XLOG_WAS_CONT_TRANS:
2969                                 error = xlog_recover_add_to_cont_trans(log,
2970                                                 trans, dp,
2971                                                 be32_to_cpu(ohead->oh_len));
2972                                 break;
2973                         case XLOG_START_TRANS:
2974                                 xlog_warn(
2975                         "XFS: xlog_recover_process_data: bad transaction");
2976                                 ASSERT(0);
2977                                 error = XFS_ERROR(EIO);
2978                                 break;
2979                         case 0:
2980                         case XLOG_CONTINUE_TRANS:
2981                                 error = xlog_recover_add_to_trans(log, trans,
2982                                                 dp, be32_to_cpu(ohead->oh_len));
2983                                 break;
2984                         default:
2985                                 xlog_warn(
2986                         "XFS: xlog_recover_process_data: bad flag");
2987                                 ASSERT(0);
2988                                 error = XFS_ERROR(EIO);
2989                                 break;
2990                         }
2991                         if (error)
2992                                 return error;
2993                 }
2994                 dp += be32_to_cpu(ohead->oh_len);
2995                 num_logops--;
2996         }
2997         return 0;
2998 }
2999
3000 /*
3001  * Process an extent free intent item that was recovered from
3002  * the log.  We need to free the extents that it describes.
3003  */
3004 STATIC int
3005 xlog_recover_process_efi(
3006         xfs_mount_t             *mp,
3007         xfs_efi_log_item_t      *efip)
3008 {
3009         xfs_efd_log_item_t      *efdp;
3010         xfs_trans_t             *tp;
3011         int                     i;
3012         int                     error = 0;
3013         xfs_extent_t            *extp;
3014         xfs_fsblock_t           startblock_fsb;
3015
3016         ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
3017
3018         /*
3019          * First check the validity of the extents described by the
3020          * EFI.  If any are bad, then assume that all are bad and
3021          * just toss the EFI.
3022          */
3023         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3024                 extp = &(efip->efi_format.efi_extents[i]);
3025                 startblock_fsb = XFS_BB_TO_FSB(mp,
3026                                    XFS_FSB_TO_DADDR(mp, extp->ext_start));
3027                 if ((startblock_fsb == 0) ||
3028                     (extp->ext_len == 0) ||
3029                     (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3030                     (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3031                         /*
3032                          * This will pull the EFI from the AIL and
3033                          * free the memory associated with it.
3034                          */
3035                         xfs_efi_release(efip, efip->efi_format.efi_nextents);
3036                         return XFS_ERROR(EIO);
3037                 }
3038         }
3039
3040         tp = xfs_trans_alloc(mp, 0);
3041         error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3042         if (error)
3043                 goto abort_error;
3044         efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3045
3046         for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3047                 extp = &(efip->efi_format.efi_extents[i]);
3048                 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3049                 if (error)
3050                         goto abort_error;
3051                 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3052                                          extp->ext_len);
3053         }
3054
3055         efip->efi_flags |= XFS_EFI_RECOVERED;
3056         error = xfs_trans_commit(tp, 0);
3057         return error;
3058
3059 abort_error:
3060         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3061         return error;
3062 }
3063
3064 /*
3065  * When this is called, all of the EFIs which did not have
3066  * corresponding EFDs should be in the AIL.  What we do now
3067  * is free the extents associated with each one.
3068  *
3069  * Since we process the EFIs in normal transactions, they
3070  * will be removed at some point after the commit.  This prevents
3071  * us from just walking down the list processing each one.
3072  * We'll use a flag in the EFI to skip those that we've already
3073  * processed and use the AIL iteration mechanism's generation
3074  * count to try to speed this up at least a bit.
3075  *
3076  * When we start, we know that the EFIs are the only things in
3077  * the AIL.  As we process them, however, other items are added
3078  * to the AIL.  Since everything added to the AIL must come after
3079  * everything already in the AIL, we stop processing as soon as
3080  * we see something other than an EFI in the AIL.
3081  */
3082 STATIC int
3083 xlog_recover_process_efis(
3084         xlog_t                  *log)
3085 {
3086         xfs_log_item_t          *lip;
3087         xfs_efi_log_item_t      *efip;
3088         int                     error = 0;
3089         struct xfs_ail_cursor   cur;
3090         struct xfs_ail          *ailp;
3091
3092         ailp = log->l_ailp;
3093         spin_lock(&ailp->xa_lock);
3094         lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3095         while (lip != NULL) {
3096                 /*
3097                  * We're done when we see something other than an EFI.
3098                  * There should be no EFIs left in the AIL now.
3099                  */
3100                 if (lip->li_type != XFS_LI_EFI) {
3101 #ifdef DEBUG
3102                         for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3103                                 ASSERT(lip->li_type != XFS_LI_EFI);
3104 #endif
3105                         break;
3106                 }
3107
3108                 /*
3109                  * Skip EFIs that we've already processed.
3110                  */
3111                 efip = (xfs_efi_log_item_t *)lip;
3112                 if (efip->efi_flags & XFS_EFI_RECOVERED) {
3113                         lip = xfs_trans_ail_cursor_next(ailp, &cur);
3114                         continue;
3115                 }
3116
3117                 spin_unlock(&ailp->xa_lock);
3118                 error = xlog_recover_process_efi(log->l_mp, efip);
3119                 spin_lock(&ailp->xa_lock);
3120                 if (error)
3121                         goto out;
3122                 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3123         }
3124 out:
3125         xfs_trans_ail_cursor_done(ailp, &cur);
3126         spin_unlock(&ailp->xa_lock);
3127         return error;
3128 }
3129
3130 /*
3131  * This routine performs a transaction to null out a bad inode pointer
3132  * in an agi unlinked inode hash bucket.
3133  */
3134 STATIC void
3135 xlog_recover_clear_agi_bucket(
3136         xfs_mount_t     *mp,
3137         xfs_agnumber_t  agno,
3138         int             bucket)
3139 {
3140         xfs_trans_t     *tp;
3141         xfs_agi_t       *agi;
3142         xfs_buf_t       *agibp;
3143         int             offset;
3144         int             error;
3145
3146         tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3147         error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3148                                   0, 0, 0);
3149         if (error)
3150                 goto out_abort;
3151
3152         error = xfs_read_agi(mp, tp, agno, &agibp);
3153         if (error)
3154                 goto out_abort;
3155
3156         agi = XFS_BUF_TO_AGI(agibp);
3157         agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3158         offset = offsetof(xfs_agi_t, agi_unlinked) +
3159                  (sizeof(xfs_agino_t) * bucket);
3160         xfs_trans_log_buf(tp, agibp, offset,
3161                           (offset + sizeof(xfs_agino_t) - 1));
3162
3163         error = xfs_trans_commit(tp, 0);
3164         if (error)
3165                 goto out_error;
3166         return;
3167
3168 out_abort:
3169         xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3170 out_error:
3171         xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: "
3172                         "failed to clear agi %d. Continuing.", agno);
3173         return;
3174 }
3175
3176 STATIC xfs_agino_t
3177 xlog_recover_process_one_iunlink(
3178         struct xfs_mount                *mp,
3179         xfs_agnumber_t                  agno,
3180         xfs_agino_t                     agino,
3181         int                             bucket)
3182 {
3183         struct xfs_buf                  *ibp;
3184         struct xfs_dinode               *dip;
3185         struct xfs_inode                *ip;
3186         xfs_ino_t                       ino;
3187         int                             error;
3188
3189         ino = XFS_AGINO_TO_INO(mp, agno, agino);
3190         error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3191         if (error)
3192                 goto fail;
3193
3194         /*
3195          * Get the on disk inode to find the next inode in the bucket.
3196          */
3197         error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
3198         if (error)
3199                 goto fail_iput;
3200
3201         ASSERT(ip->i_d.di_nlink == 0);
3202         ASSERT(ip->i_d.di_mode != 0);
3203
3204         /* setup for the next pass */
3205         agino = be32_to_cpu(dip->di_next_unlinked);
3206         xfs_buf_relse(ibp);
3207
3208         /*
3209          * Prevent any DMAPI event from being sent when the reference on
3210          * the inode is dropped.
3211          */
3212         ip->i_d.di_dmevmask = 0;
3213
3214         IRELE(ip);
3215         return agino;
3216
3217  fail_iput:
3218         IRELE(ip);
3219  fail:
3220         /*
3221          * We can't read in the inode this bucket points to, or this inode
3222          * is messed up.  Just ditch this bucket of inodes.  We will lose
3223          * some inodes and space, but at least we won't hang.
3224          *
3225          * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3226          * clear the inode pointer in the bucket.
3227          */
3228         xlog_recover_clear_agi_bucket(mp, agno, bucket);
3229         return NULLAGINO;
3230 }
3231
3232 /*
3233  * xlog_iunlink_recover
3234  *
3235  * This is called during recovery to process any inodes which
3236  * we unlinked but not freed when the system crashed.  These
3237  * inodes will be on the lists in the AGI blocks.  What we do
3238  * here is scan all the AGIs and fully truncate and free any
3239  * inodes found on the lists.  Each inode is removed from the
3240  * lists when it has been fully truncated and is freed.  The
3241  * freeing of the inode and its removal from the list must be
3242  * atomic.
3243  */
3244 STATIC void
3245 xlog_recover_process_iunlinks(
3246         xlog_t          *log)
3247 {
3248         xfs_mount_t     *mp;
3249         xfs_agnumber_t  agno;
3250         xfs_agi_t       *agi;
3251         xfs_buf_t       *agibp;
3252         xfs_agino_t     agino;
3253         int             bucket;
3254         int             error;
3255         uint            mp_dmevmask;
3256
3257         mp = log->l_mp;
3258
3259         /*
3260          * Prevent any DMAPI event from being sent while in this function.
3261          */
3262         mp_dmevmask = mp->m_dmevmask;
3263         mp->m_dmevmask = 0;
3264
3265         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3266                 /*
3267                  * Find the agi for this ag.
3268                  */
3269                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3270                 if (error) {
3271                         /*
3272                          * AGI is b0rked. Don't process it.
3273                          *
3274                          * We should probably mark the filesystem as corrupt
3275                          * after we've recovered all the ag's we can....
3276                          */
3277                         continue;
3278                 }
3279                 agi = XFS_BUF_TO_AGI(agibp);
3280
3281                 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3282                         agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3283                         while (agino != NULLAGINO) {
3284                                 /*
3285                                  * Release the agi buffer so that it can
3286                                  * be acquired in the normal course of the
3287                                  * transaction to truncate and free the inode.
3288                                  */
3289                                 xfs_buf_relse(agibp);
3290
3291                                 agino = xlog_recover_process_one_iunlink(mp,
3292                                                         agno, agino, bucket);
3293
3294                                 /*
3295                                  * Reacquire the agibuffer and continue around
3296                                  * the loop. This should never fail as we know
3297                                  * the buffer was good earlier on.
3298                                  */
3299                                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3300                                 ASSERT(error == 0);
3301                                 agi = XFS_BUF_TO_AGI(agibp);
3302                         }
3303                 }
3304
3305                 /*
3306                  * Release the buffer for the current agi so we can
3307                  * go on to the next one.
3308                  */
3309                 xfs_buf_relse(agibp);
3310         }
3311
3312         mp->m_dmevmask = mp_dmevmask;
3313 }
3314
3315
3316 #ifdef DEBUG
3317 STATIC void
3318 xlog_pack_data_checksum(
3319         xlog_t          *log,
3320         xlog_in_core_t  *iclog,
3321         int             size)
3322 {
3323         int             i;
3324         __be32          *up;
3325         uint            chksum = 0;
3326
3327         up = (__be32 *)iclog->ic_datap;
3328         /* divide length by 4 to get # words */
3329         for (i = 0; i < (size >> 2); i++) {
3330                 chksum ^= be32_to_cpu(*up);
3331                 up++;
3332         }
3333         iclog->ic_header.h_chksum = cpu_to_be32(chksum);
3334 }
3335 #else
3336 #define xlog_pack_data_checksum(log, iclog, size)
3337 #endif
3338
3339 /*
3340  * Stamp cycle number in every block
3341  */
3342 void
3343 xlog_pack_data(
3344         xlog_t                  *log,
3345         xlog_in_core_t          *iclog,
3346         int                     roundoff)
3347 {
3348         int                     i, j, k;
3349         int                     size = iclog->ic_offset + roundoff;
3350         __be32                  cycle_lsn;
3351         xfs_caddr_t             dp;
3352
3353         xlog_pack_data_checksum(log, iclog, size);
3354
3355         cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
3356
3357         dp = iclog->ic_datap;
3358         for (i = 0; i < BTOBB(size) &&
3359                 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3360                 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
3361                 *(__be32 *)dp = cycle_lsn;
3362                 dp += BBSIZE;
3363         }
3364
3365         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3366                 xlog_in_core_2_t *xhdr = iclog->ic_data;
3367
3368                 for ( ; i < BTOBB(size); i++) {
3369                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3370                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3371                         xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp;
3372                         *(__be32 *)dp = cycle_lsn;
3373                         dp += BBSIZE;
3374                 }
3375
3376                 for (i = 1; i < log->l_iclog_heads; i++) {
3377                         xhdr[i].hic_xheader.xh_cycle = cycle_lsn;
3378                 }
3379         }
3380 }
3381
3382 STATIC void
3383 xlog_unpack_data(
3384         xlog_rec_header_t       *rhead,
3385         xfs_caddr_t             dp,
3386         xlog_t                  *log)
3387 {
3388         int                     i, j, k;
3389
3390         for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3391                   i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3392                 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3393                 dp += BBSIZE;
3394         }
3395
3396         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3397                 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3398                 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3399                         j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3400                         k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3401                         *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3402                         dp += BBSIZE;
3403                 }
3404         }
3405 }
3406
3407 STATIC int
3408 xlog_valid_rec_header(
3409         xlog_t                  *log,
3410         xlog_rec_header_t       *rhead,
3411         xfs_daddr_t             blkno)
3412 {
3413         int                     hlen;
3414
3415         if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
3416                 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3417                                 XFS_ERRLEVEL_LOW, log->l_mp);
3418                 return XFS_ERROR(EFSCORRUPTED);
3419         }
3420         if (unlikely(
3421             (!rhead->h_version ||
3422             (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3423                 xlog_warn("XFS: %s: unrecognised log version (%d).",
3424                         __func__, be32_to_cpu(rhead->h_version));
3425                 return XFS_ERROR(EIO);
3426         }
3427
3428         /* LR body must have data or it wouldn't have been written */
3429         hlen = be32_to_cpu(rhead->h_len);
3430         if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3431                 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3432                                 XFS_ERRLEVEL_LOW, log->l_mp);
3433                 return XFS_ERROR(EFSCORRUPTED);
3434         }
3435         if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3436                 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3437                                 XFS_ERRLEVEL_LOW, log->l_mp);
3438                 return XFS_ERROR(EFSCORRUPTED);
3439         }
3440         return 0;
3441 }
3442
3443 /*
3444  * Read the log from tail to head and process the log records found.
3445  * Handle the two cases where the tail and head are in the same cycle
3446  * and where the active portion of the log wraps around the end of
3447  * the physical log separately.  The pass parameter is passed through
3448  * to the routines called to process the data and is not looked at
3449  * here.
3450  */
3451 STATIC int
3452 xlog_do_recovery_pass(
3453         xlog_t                  *log,
3454         xfs_daddr_t             head_blk,
3455         xfs_daddr_t             tail_blk,
3456         int                     pass)
3457 {
3458         xlog_rec_header_t       *rhead;
3459         xfs_daddr_t             blk_no;
3460         xfs_caddr_t             offset;
3461         xfs_buf_t               *hbp, *dbp;
3462         int                     error = 0, h_size;
3463         int                     bblks, split_bblks;
3464         int                     hblks, split_hblks, wrapped_hblks;
3465         struct hlist_head       rhash[XLOG_RHASH_SIZE];
3466
3467         ASSERT(head_blk != tail_blk);
3468
3469         /*
3470          * Read the header of the tail block and get the iclog buffer size from
3471          * h_size.  Use this to tell how many sectors make up the log header.
3472          */
3473         if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3474                 /*
3475                  * When using variable length iclogs, read first sector of
3476                  * iclog header and extract the header size from it.  Get a
3477                  * new hbp that is the correct size.
3478                  */
3479                 hbp = xlog_get_bp(log, 1);
3480                 if (!hbp)
3481                         return ENOMEM;
3482
3483                 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3484                 if (error)
3485                         goto bread_err1;
3486
3487                 rhead = (xlog_rec_header_t *)offset;
3488                 error = xlog_valid_rec_header(log, rhead, tail_blk);
3489                 if (error)
3490                         goto bread_err1;
3491                 h_size = be32_to_cpu(rhead->h_size);
3492                 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3493                     (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3494                         hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3495                         if (h_size % XLOG_HEADER_CYCLE_SIZE)
3496                                 hblks++;
3497                         xlog_put_bp(hbp);
3498                         hbp = xlog_get_bp(log, hblks);
3499                 } else {
3500                         hblks = 1;
3501                 }
3502         } else {
3503                 ASSERT(log->l_sectBBsize == 1);
3504                 hblks = 1;
3505                 hbp = xlog_get_bp(log, 1);
3506                 h_size = XLOG_BIG_RECORD_BSIZE;
3507         }
3508
3509         if (!hbp)
3510                 return ENOMEM;
3511         dbp = xlog_get_bp(log, BTOBB(h_size));
3512         if (!dbp) {
3513                 xlog_put_bp(hbp);
3514                 return ENOMEM;
3515         }
3516
3517         memset(rhash, 0, sizeof(rhash));
3518         if (tail_blk <= head_blk) {
3519                 for (blk_no = tail_blk; blk_no < head_blk; ) {
3520                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3521                         if (error)
3522                                 goto bread_err2;
3523
3524                         rhead = (xlog_rec_header_t *)offset;
3525                         error = xlog_valid_rec_header(log, rhead, blk_no);
3526                         if (error)
3527                                 goto bread_err2;
3528
3529                         /* blocks in data section */
3530                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3531                         error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3532                                            &offset);
3533                         if (error)
3534                                 goto bread_err2;
3535
3536                         xlog_unpack_data(rhead, offset, log);
3537                         if ((error = xlog_recover_process_data(log,
3538                                                 rhash, rhead, offset, pass)))
3539                                 goto bread_err2;
3540                         blk_no += bblks + hblks;
3541                 }
3542         } else {
3543                 /*
3544                  * Perform recovery around the end of the physical log.
3545                  * When the head is not on the same cycle number as the tail,
3546                  * we can't do a sequential recovery as above.
3547                  */
3548                 blk_no = tail_blk;
3549                 while (blk_no < log->l_logBBsize) {
3550                         /*
3551                          * Check for header wrapping around physical end-of-log
3552                          */
3553                         offset = XFS_BUF_PTR(hbp);
3554                         split_hblks = 0;
3555                         wrapped_hblks = 0;
3556                         if (blk_no + hblks <= log->l_logBBsize) {
3557                                 /* Read header in one read */
3558                                 error = xlog_bread(log, blk_no, hblks, hbp,
3559                                                    &offset);
3560                                 if (error)
3561                                         goto bread_err2;
3562                         } else {
3563                                 /* This LR is split across physical log end */
3564                                 if (blk_no != log->l_logBBsize) {
3565                                         /* some data before physical log end */
3566                                         ASSERT(blk_no <= INT_MAX);
3567                                         split_hblks = log->l_logBBsize - (int)blk_no;
3568                                         ASSERT(split_hblks > 0);
3569                                         error = xlog_bread(log, blk_no,
3570                                                            split_hblks, hbp,
3571                                                            &offset);
3572                                         if (error)
3573                                                 goto bread_err2;
3574                                 }
3575
3576                                 /*
3577                                  * Note: this black magic still works with
3578                                  * large sector sizes (non-512) only because:
3579                                  * - we increased the buffer size originally
3580                                  *   by 1 sector giving us enough extra space
3581                                  *   for the second read;
3582                                  * - the log start is guaranteed to be sector
3583                                  *   aligned;
3584                                  * - we read the log end (LR header start)
3585                                  *   _first_, then the log start (LR header end)
3586                                  *   - order is important.
3587                                  */
3588                                 wrapped_hblks = hblks - split_hblks;
3589                                 error = XFS_BUF_SET_PTR(hbp,
3590                                                 offset + BBTOB(split_hblks),
3591                                                 BBTOB(hblks - split_hblks));
3592                                 if (error)
3593                                         goto bread_err2;
3594
3595                                 error = xlog_bread_noalign(log, 0,
3596                                                            wrapped_hblks, hbp);
3597                                 if (error)
3598                                         goto bread_err2;
3599
3600                                 error = XFS_BUF_SET_PTR(hbp, offset,
3601                                                         BBTOB(hblks));
3602                                 if (error)
3603                                         goto bread_err2;
3604                         }
3605                         rhead = (xlog_rec_header_t *)offset;
3606                         error = xlog_valid_rec_header(log, rhead,
3607                                                 split_hblks ? blk_no : 0);
3608                         if (error)
3609                                 goto bread_err2;
3610
3611                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3612                         blk_no += hblks;
3613
3614                         /* Read in data for log record */
3615                         if (blk_no + bblks <= log->l_logBBsize) {
3616                                 error = xlog_bread(log, blk_no, bblks, dbp,
3617                                                    &offset);
3618                                 if (error)
3619                                         goto bread_err2;
3620                         } else {
3621                                 /* This log record is split across the
3622                                  * physical end of log */
3623                                 offset = XFS_BUF_PTR(dbp);
3624                                 split_bblks = 0;
3625                                 if (blk_no != log->l_logBBsize) {
3626                                         /* some data is before the physical
3627                                          * end of log */
3628                                         ASSERT(!wrapped_hblks);
3629                                         ASSERT(blk_no <= INT_MAX);
3630                                         split_bblks =
3631                                                 log->l_logBBsize - (int)blk_no;
3632                                         ASSERT(split_bblks > 0);
3633                                         error = xlog_bread(log, blk_no,
3634                                                         split_bblks, dbp,
3635                                                         &offset);
3636                                         if (error)
3637                                                 goto bread_err2;
3638                                 }
3639
3640                                 /*
3641                                  * Note: this black magic still works with
3642                                  * large sector sizes (non-512) only because:
3643                                  * - we increased the buffer size originally
3644                                  *   by 1 sector giving us enough extra space
3645                                  *   for the second read;
3646                                  * - the log start is guaranteed to be sector
3647                                  *   aligned;
3648                                  * - we read the log end (LR header start)
3649                                  *   _first_, then the log start (LR header end)
3650                                  *   - order is important.
3651                                  */
3652                                 error = XFS_BUF_SET_PTR(dbp,
3653                                                 offset + BBTOB(split_bblks),
3654                                                 BBTOB(bblks - split_bblks));
3655                                 if (error)
3656                                         goto bread_err2;
3657
3658                                 error = xlog_bread_noalign(log, wrapped_hblks,
3659                                                 bblks - split_bblks,
3660                                                 dbp);
3661                                 if (error)
3662                                         goto bread_err2;
3663
3664                                 error = XFS_BUF_SET_PTR(dbp, offset, h_size);
3665                                 if (error)
3666                                         goto bread_err2;
3667                         }
3668                         xlog_unpack_data(rhead, offset, log);
3669                         if ((error = xlog_recover_process_data(log, rhash,
3670                                                         rhead, offset, pass)))
3671                                 goto bread_err2;
3672                         blk_no += bblks;
3673                 }
3674
3675                 ASSERT(blk_no >= log->l_logBBsize);
3676                 blk_no -= log->l_logBBsize;
3677
3678                 /* read first part of physical log */
3679                 while (blk_no < head_blk) {
3680                         error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3681                         if (error)
3682                                 goto bread_err2;
3683
3684                         rhead = (xlog_rec_header_t *)offset;
3685                         error = xlog_valid_rec_header(log, rhead, blk_no);
3686                         if (error)
3687                                 goto bread_err2;
3688
3689                         bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3690                         error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3691                                            &offset);
3692                         if (error)
3693                                 goto bread_err2;
3694
3695                         xlog_unpack_data(rhead, offset, log);
3696                         if ((error = xlog_recover_process_data(log, rhash,
3697                                                         rhead, offset, pass)))
3698                                 goto bread_err2;
3699                         blk_no += bblks + hblks;
3700                 }
3701         }
3702
3703  bread_err2:
3704         xlog_put_bp(dbp);
3705  bread_err1:
3706         xlog_put_bp(hbp);
3707         return error;
3708 }
3709
3710 /*
3711  * Do the recovery of the log.  We actually do this in two phases.
3712  * The two passes are necessary in order to implement the function
3713  * of cancelling a record written into the log.  The first pass
3714  * determines those things which have been cancelled, and the
3715  * second pass replays log items normally except for those which
3716  * have been cancelled.  The handling of the replay and cancellations
3717  * takes place in the log item type specific routines.
3718  *
3719  * The table of items which have cancel records in the log is allocated
3720  * and freed at this level, since only here do we know when all of
3721  * the log recovery has been completed.
3722  */
3723 STATIC int
3724 xlog_do_log_recovery(
3725         xlog_t          *log,
3726         xfs_daddr_t     head_blk,
3727         xfs_daddr_t     tail_blk)
3728 {
3729         int             error;
3730
3731         ASSERT(head_blk != tail_blk);
3732
3733         /*
3734          * First do a pass to find all of the cancelled buf log items.
3735          * Store them in the buf_cancel_table for use in the second pass.
3736          */
3737         log->l_buf_cancel_table =
3738                 (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
3739                                                  sizeof(xfs_buf_cancel_t*),
3740                                                  KM_SLEEP);
3741         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3742                                       XLOG_RECOVER_PASS1);
3743         if (error != 0) {
3744                 kmem_free(log->l_buf_cancel_table);
3745                 log->l_buf_cancel_table = NULL;
3746                 return error;
3747         }
3748         /*
3749          * Then do a second pass to actually recover the items in the log.
3750          * When it is complete free the table of buf cancel items.
3751          */
3752         error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3753                                       XLOG_RECOVER_PASS2);
3754 #ifdef DEBUG
3755         if (!error) {
3756                 int     i;
3757
3758                 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3759                         ASSERT(log->l_buf_cancel_table[i] == NULL);
3760         }
3761 #endif  /* DEBUG */
3762
3763         kmem_free(log->l_buf_cancel_table);
3764         log->l_buf_cancel_table = NULL;
3765
3766         return error;
3767 }
3768
3769 /*
3770  * Do the actual recovery
3771  */
3772 STATIC int
3773 xlog_do_recover(
3774         xlog_t          *log,
3775         xfs_daddr_t     head_blk,
3776         xfs_daddr_t     tail_blk)
3777 {
3778         int             error;
3779         xfs_buf_t       *bp;
3780         xfs_sb_t        *sbp;
3781
3782         /*
3783          * First replay the images in the log.
3784          */
3785         error = xlog_do_log_recovery(log, head_blk, tail_blk);
3786         if (error) {
3787                 return error;
3788         }
3789
3790         XFS_bflush(log->l_mp->m_ddev_targp);
3791
3792         /*
3793          * If IO errors happened during recovery, bail out.
3794          */
3795         if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
3796                 return (EIO);
3797         }
3798
3799         /*
3800          * We now update the tail_lsn since much of the recovery has completed
3801          * and there may be space available to use.  If there were no extent
3802          * or iunlinks, we can free up the entire log and set the tail_lsn to
3803          * be the last_sync_lsn.  This was set in xlog_find_tail to be the
3804          * lsn of the last known good LR on disk.  If there are extent frees
3805          * or iunlinks they will have some entries in the AIL; so we look at
3806          * the AIL to determine how to set the tail_lsn.
3807          */
3808         xlog_assign_tail_lsn(log->l_mp);
3809
3810         /*
3811          * Now that we've finished replaying all buffer and inode
3812          * updates, re-read in the superblock.
3813          */
3814         bp = xfs_getsb(log->l_mp, 0);
3815         XFS_BUF_UNDONE(bp);
3816         ASSERT(!(XFS_BUF_ISWRITE(bp)));
3817         ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
3818         XFS_BUF_READ(bp);
3819         XFS_BUF_UNASYNC(bp);
3820         xfsbdstrat(log->l_mp, bp);
3821         error = xfs_iowait(bp);
3822         if (error) {
3823                 xfs_ioerror_alert("xlog_do_recover",
3824                                   log->l_mp, bp, XFS_BUF_ADDR(bp));
3825                 ASSERT(0);
3826                 xfs_buf_relse(bp);
3827                 return error;
3828         }
3829
3830         /* Convert superblock from on-disk format */
3831         sbp = &log->l_mp->m_sb;
3832         xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
3833         ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
3834         ASSERT(xfs_sb_good_version(sbp));
3835         xfs_buf_relse(bp);
3836
3837         /* We've re-read the superblock so re-initialize per-cpu counters */
3838         xfs_icsb_reinit_counters(log->l_mp);
3839
3840         xlog_recover_check_summary(log);
3841
3842         /* Normal transactions can now occur */
3843         log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3844         return 0;
3845 }
3846
3847 /*
3848  * Perform recovery and re-initialize some log variables in xlog_find_tail.
3849  *
3850  * Return error or zero.
3851  */
3852 int
3853 xlog_recover(
3854         xlog_t          *log)
3855 {
3856         xfs_daddr_t     head_blk, tail_blk;
3857         int             error;
3858
3859         /* find the tail of the log */
3860         if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
3861                 return error;
3862
3863         if (tail_blk != head_blk) {
3864                 /* There used to be a comment here:
3865                  *
3866                  * disallow recovery on read-only mounts.  note -- mount
3867                  * checks for ENOSPC and turns it into an intelligent
3868                  * error message.
3869                  * ...but this is no longer true.  Now, unless you specify
3870                  * NORECOVERY (in which case this function would never be
3871                  * called), we just go ahead and recover.  We do this all
3872                  * under the vfs layer, so we can get away with it unless
3873                  * the device itself is read-only, in which case we fail.
3874                  */
3875                 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3876                         return error;
3877                 }
3878
3879                 cmn_err(CE_NOTE,
3880                         "Starting XFS recovery on filesystem: %s (logdev: %s)",
3881                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3882                         log->l_mp->m_logname : "internal");
3883
3884                 error = xlog_do_recover(log, head_blk, tail_blk);
3885                 log->l_flags |= XLOG_RECOVERY_NEEDED;
3886         }
3887         return error;
3888 }
3889
3890 /*
3891  * In the first part of recovery we replay inodes and buffers and build
3892  * up the list of extent free items which need to be processed.  Here
3893  * we process the extent free items and clean up the on disk unlinked
3894  * inode lists.  This is separated from the first part of recovery so
3895  * that the root and real-time bitmap inodes can be read in from disk in
3896  * between the two stages.  This is necessary so that we can free space
3897  * in the real-time portion of the file system.
3898  */
3899 int
3900 xlog_recover_finish(
3901         xlog_t          *log)
3902 {
3903         /*
3904          * Now we're ready to do the transactions needed for the
3905          * rest of recovery.  Start with completing all the extent
3906          * free intent records and then process the unlinked inode
3907          * lists.  At this point, we essentially run in normal mode
3908          * except that we're still performing recovery actions
3909          * rather than accepting new requests.
3910          */
3911         if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3912                 int     error;
3913                 error = xlog_recover_process_efis(log);
3914                 if (error) {
3915                         cmn_err(CE_ALERT,
3916                                 "Failed to recover EFIs on filesystem: %s",
3917                                 log->l_mp->m_fsname);
3918                         return error;
3919                 }
3920                 /*
3921                  * Sync the log to get all the EFIs out of the AIL.
3922                  * This isn't absolutely necessary, but it helps in
3923                  * case the unlink transactions would have problems
3924                  * pushing the EFIs out of the way.
3925                  */
3926                 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3927
3928                 xlog_recover_process_iunlinks(log);
3929
3930                 xlog_recover_check_summary(log);
3931
3932                 cmn_err(CE_NOTE,
3933                         "Ending XFS recovery on filesystem: %s (logdev: %s)",
3934                         log->l_mp->m_fsname, log->l_mp->m_logname ?
3935                         log->l_mp->m_logname : "internal");
3936                 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3937         } else {
3938                 cmn_err(CE_DEBUG,
3939                         "!Ending clean XFS mount for filesystem: %s\n",
3940                         log->l_mp->m_fsname);
3941         }
3942         return 0;
3943 }
3944
3945
3946 #if defined(DEBUG)
3947 /*
3948  * Read all of the agf and agi counters and check that they
3949  * are consistent with the superblock counters.
3950  */
3951 void
3952 xlog_recover_check_summary(
3953         xlog_t          *log)
3954 {
3955         xfs_mount_t     *mp;
3956         xfs_agf_t       *agfp;
3957         xfs_buf_t       *agfbp;
3958         xfs_buf_t       *agibp;
3959         xfs_agnumber_t  agno;
3960         __uint64_t      freeblks;
3961         __uint64_t      itotal;
3962         __uint64_t      ifree;
3963         int             error;
3964
3965         mp = log->l_mp;
3966
3967         freeblks = 0LL;
3968         itotal = 0LL;
3969         ifree = 0LL;
3970         for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3971                 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3972                 if (error) {
3973                         xfs_fs_cmn_err(CE_ALERT, mp,
3974                                         "xlog_recover_check_summary(agf)"
3975                                         "agf read failed agno %d error %d",
3976                                                         agno, error);
3977                 } else {
3978                         agfp = XFS_BUF_TO_AGF(agfbp);
3979                         freeblks += be32_to_cpu(agfp->agf_freeblks) +
3980                                     be32_to_cpu(agfp->agf_flcount);
3981                         xfs_buf_relse(agfbp);
3982                 }
3983
3984                 error = xfs_read_agi(mp, NULL, agno, &agibp);
3985                 if (!error) {
3986                         struct xfs_agi  *agi = XFS_BUF_TO_AGI(agibp);
3987
3988                         itotal += be32_to_cpu(agi->agi_count);
3989                         ifree += be32_to_cpu(agi->agi_freecount);
3990                         xfs_buf_relse(agibp);
3991                 }
3992         }
3993 }
3994 #endif /* DEBUG */