|
@@ -50,7 +50,6 @@ kmem_zone_t *xfs_log_ticket_zone;
|
|
(off) += (bytes);}
|
|
(off) += (bytes);}
|
|
|
|
|
|
/* Local miscellaneous function prototypes */
|
|
/* Local miscellaneous function prototypes */
|
|
-STATIC int xlog_bdstrat_cb(struct xfs_buf *);
|
|
|
|
STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
|
|
STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
|
|
xlog_in_core_t **, xfs_lsn_t *);
|
|
xlog_in_core_t **, xfs_lsn_t *);
|
|
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
|
|
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
|
|
@@ -987,35 +986,6 @@ xlog_iodone(xfs_buf_t *bp)
|
|
|
|
|
|
} /* xlog_iodone */
|
|
} /* xlog_iodone */
|
|
|
|
|
|
-/*
|
|
|
|
- * The bdstrat callback function for log bufs. This gives us a central
|
|
|
|
- * place to trap bufs in case we get hit by a log I/O error and need to
|
|
|
|
- * shutdown. Actually, in practice, even when we didn't get a log error,
|
|
|
|
- * we transition the iclogs to IOERROR state *after* flushing all existing
|
|
|
|
- * iclogs to disk. This is because we don't want anymore new transactions to be
|
|
|
|
- * started or completed afterwards.
|
|
|
|
- */
|
|
|
|
-STATIC int
|
|
|
|
-xlog_bdstrat_cb(struct xfs_buf *bp)
|
|
|
|
-{
|
|
|
|
- xlog_in_core_t *iclog;
|
|
|
|
-
|
|
|
|
- iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
|
|
|
-
|
|
|
|
- if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
|
|
|
|
- /* note for irix bstrat will need struct bdevsw passed
|
|
|
|
- * Fix the following macro if the code ever is merged
|
|
|
|
- */
|
|
|
|
- XFS_bdstrat(bp);
|
|
|
|
- return 0;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- XFS_BUF_ERROR(bp, EIO);
|
|
|
|
- XFS_BUF_STALE(bp);
|
|
|
|
- xfs_biodone(bp);
|
|
|
|
- return XFS_ERROR(EIO);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Return size of each in-core log record buffer.
|
|
* Return size of each in-core log record buffer.
|
|
*
|
|
*
|
|
@@ -1158,7 +1128,6 @@ xlog_alloc_log(xfs_mount_t *mp,
|
|
if (!bp)
|
|
if (!bp)
|
|
goto out_free_log;
|
|
goto out_free_log;
|
|
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
|
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
|
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
|
|
|
|
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
|
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
|
ASSERT(XFS_BUF_ISBUSY(bp));
|
|
ASSERT(XFS_BUF_ISBUSY(bp));
|
|
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
|
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
|
|
@@ -1196,7 +1165,6 @@ xlog_alloc_log(xfs_mount_t *mp,
|
|
if (!XFS_BUF_CPSEMA(bp))
|
|
if (!XFS_BUF_CPSEMA(bp))
|
|
ASSERT(0);
|
|
ASSERT(0);
|
|
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
|
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
|
|
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
|
|
|
|
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
|
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
|
|
iclog->ic_bp = bp;
|
|
iclog->ic_bp = bp;
|
|
iclog->ic_data = bp->b_addr;
|
|
iclog->ic_data = bp->b_addr;
|
|
@@ -1343,6 +1311,37 @@ xlog_grant_push_ail(xfs_mount_t *mp,
|
|
xfs_trans_ail_push(log->l_ailp, threshold_lsn);
|
|
xfs_trans_ail_push(log->l_ailp, threshold_lsn);
|
|
} /* xlog_grant_push_ail */
|
|
} /* xlog_grant_push_ail */
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The bdstrat callback function for log bufs. This gives us a central
|
|
|
|
+ * place to trap bufs in case we get hit by a log I/O error and need to
|
|
|
|
+ * shutdown. Actually, in practice, even when we didn't get a log error,
|
|
|
|
+ * we transition the iclogs to IOERROR state *after* flushing all existing
|
|
|
|
+ * iclogs to disk. This is because we don't want anymore new transactions to be
|
|
|
|
+ * started or completed afterwards.
|
|
|
|
+ */
|
|
|
|
+STATIC int
|
|
|
|
+xlog_bdstrat(
|
|
|
|
+ struct xfs_buf *bp)
|
|
|
|
+{
|
|
|
|
+ struct xlog_in_core *iclog;
|
|
|
|
+
|
|
|
|
+ iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
|
|
|
|
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
|
|
|
|
+ XFS_BUF_ERROR(bp, EIO);
|
|
|
|
+ XFS_BUF_STALE(bp);
|
|
|
|
+ xfs_biodone(bp);
|
|
|
|
+ /*
|
|
|
|
+ * It would seem logical to return EIO here, but we rely on
|
|
|
|
+ * the log state machine to propagate I/O errors instead of
|
|
|
|
+ * doing it here.
|
|
|
|
+ */
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ bp->b_flags |= _XBF_RUN_QUEUES;
|
|
|
|
+ xfs_buf_iorequest(bp);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
|
|
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
|
|
@@ -1462,7 +1461,7 @@ xlog_sync(xlog_t *log,
|
|
*/
|
|
*/
|
|
XFS_BUF_WRITE(bp);
|
|
XFS_BUF_WRITE(bp);
|
|
|
|
|
|
- if ((error = XFS_bwrite(bp))) {
|
|
|
|
|
|
+ if ((error = xlog_bdstrat(bp))) {
|
|
xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
|
|
xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
|
|
XFS_BUF_ADDR(bp));
|
|
XFS_BUF_ADDR(bp));
|
|
return error;
|
|
return error;
|
|
@@ -1502,7 +1501,7 @@ xlog_sync(xlog_t *log,
|
|
/* account for internal log which doesn't start at block #0 */
|
|
/* account for internal log which doesn't start at block #0 */
|
|
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
|
|
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
|
|
XFS_BUF_WRITE(bp);
|
|
XFS_BUF_WRITE(bp);
|
|
- if ((error = XFS_bwrite(bp))) {
|
|
|
|
|
|
+ if ((error = xlog_bdstrat(bp))) {
|
|
xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
|
|
xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
|
|
bp, XFS_BUF_ADDR(bp));
|
|
bp, XFS_BUF_ADDR(bp));
|
|
return error;
|
|
return error;
|