|
@@ -630,6 +630,29 @@ xfs_buf_get_flags(
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+STATIC int
|
|
|
+_xfs_buf_read(
|
|
|
+ xfs_buf_t *bp,
|
|
|
+ xfs_buf_flags_t flags)
|
|
|
+{
|
|
|
+ int status;
|
|
|
+
|
|
|
+ XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
|
|
|
+
|
|
|
+ ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
|
|
|
+ ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
|
|
|
+
|
|
|
+ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
|
|
|
+ XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
|
|
+ bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
|
|
|
+ XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
|
|
+
|
|
|
+ status = xfs_buf_iorequest(bp);
|
|
|
+ if (!status && !(flags & XBF_ASYNC))
|
|
|
+ status = xfs_buf_iowait(bp);
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
xfs_buf_t *
|
|
|
xfs_buf_read_flags(
|
|
|
xfs_buftarg_t *target,
|
|
@@ -646,7 +669,7 @@ xfs_buf_read_flags(
|
|
|
if (!XFS_BUF_ISDONE(bp)) {
|
|
|
XB_TRACE(bp, "read", (unsigned long)flags);
|
|
|
XFS_STATS_INC(xb_get_read);
|
|
|
- xfs_buf_iostart(bp, flags);
|
|
|
+ _xfs_buf_read(bp, flags);
|
|
|
} else if (flags & XBF_ASYNC) {
|
|
|
XB_TRACE(bp, "read_async", (unsigned long)flags);
|
|
|
/*
|
|
@@ -1048,50 +1071,39 @@ xfs_buf_ioerror(
|
|
|
XB_TRACE(bp, "ioerror", (unsigned long)error);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Initiate I/O on a buffer, based on the flags supplied.
|
|
|
- * The b_iodone routine in the buffer supplied will only be called
|
|
|
- * when all of the subsidiary I/O requests, if any, have been completed.
|
|
|
- */
|
|
|
int
|
|
|
-xfs_buf_iostart(
|
|
|
- xfs_buf_t *bp,
|
|
|
- xfs_buf_flags_t flags)
|
|
|
+xfs_bawrite(
|
|
|
+ void *mp,
|
|
|
+ struct xfs_buf *bp)
|
|
|
{
|
|
|
- int status = 0;
|
|
|
+ XB_TRACE(bp, "bawrite", 0);
|
|
|
|
|
|
- XB_TRACE(bp, "iostart", (unsigned long)flags);
|
|
|
+ ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
|
|
|
|
|
|
- if (flags & XBF_DELWRI) {
|
|
|
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
|
|
|
- bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
|
|
|
- xfs_buf_delwri_queue(bp, 1);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ xfs_buf_delwri_dequeue(bp);
|
|
|
|
|
|
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
|
|
|
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
|
|
- bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
|
|
|
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
|
|
|
+ bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
|
|
|
+ bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
|
|
|
+
|
|
|
+ bp->b_fspriv3 = mp;
|
|
|
+ bp->b_strat = xfs_bdstrat_cb;
|
|
|
+ return xfs_bdstrat_cb(bp);
|
|
|
+}
|
|
|
|
|
|
- BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
|
|
|
+void
|
|
|
+xfs_bdwrite(
|
|
|
+ void *mp,
|
|
|
+ struct xfs_buf *bp)
|
|
|
+{
|
|
|
+ XB_TRACE(bp, "bdwrite", 0);
|
|
|
|
|
|
- /* For writes allow an alternate strategy routine to precede
|
|
|
- * the actual I/O request (which may not be issued at all in
|
|
|
- * a shutdown situation, for example).
|
|
|
- */
|
|
|
- status = (flags & XBF_WRITE) ?
|
|
|
- xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
|
|
|
+ bp->b_strat = xfs_bdstrat_cb;
|
|
|
+ bp->b_fspriv3 = mp;
|
|
|
|
|
|
- /* Wait for I/O if we are not an async request.
|
|
|
- * Note: async I/O request completion will release the buffer,
|
|
|
- * and that can already be done by this point. So using the
|
|
|
- * buffer pointer from here on, after async I/O, is invalid.
|
|
|
- */
|
|
|
- if (!status && !(flags & XBF_ASYNC))
|
|
|
- status = xfs_buf_iowait(bp);
|
|
|
+ bp->b_flags &= ~XBF_READ;
|
|
|
+ bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
|
|
|
|
|
|
- return status;
|
|
|
+ xfs_buf_delwri_queue(bp, 1);
|
|
|
}
|
|
|
|
|
|
STATIC_INLINE void
|