|
@@ -126,21 +126,15 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
|
|
|
|
|
|
/*
|
|
|
* Update on-disk file size now that data has been written to disk.
|
|
|
- *
|
|
|
- * This function does not block as blocking on the inode lock in IO completion
|
|
|
- * can lead to IO completion order dependency deadlocks.. If it can't get the
|
|
|
- * inode ilock it will return EAGAIN. Callers must handle this.
|
|
|
*/
|
|
|
-STATIC int
|
|
|
+STATIC void
|
|
|
xfs_setfilesize(
|
|
|
- xfs_ioend_t *ioend)
|
|
|
+ struct xfs_ioend *ioend)
|
|
|
{
|
|
|
- xfs_inode_t *ip = XFS_I(ioend->io_inode);
|
|
|
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
|
|
|
xfs_fsize_t isize;
|
|
|
|
|
|
- if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
|
|
|
- return EAGAIN;
|
|
|
-
|
|
|
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
|
|
|
isize = xfs_ioend_new_eof(ioend);
|
|
|
if (isize) {
|
|
|
trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
|
|
@@ -149,7 +143,6 @@ xfs_setfilesize(
|
|
|
}
|
|
|
|
|
|
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -163,10 +156,12 @@ xfs_finish_ioend(
|
|
|
struct xfs_ioend *ioend)
|
|
|
{
|
|
|
if (atomic_dec_and_test(&ioend->io_remaining)) {
|
|
|
+ struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
|
|
|
+
|
|
|
if (ioend->io_type == IO_UNWRITTEN)
|
|
|
- queue_work(xfsconvertd_workqueue, &ioend->io_work);
|
|
|
+ queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
|
|
|
else if (xfs_ioend_is_append(ioend))
|
|
|
- queue_work(xfsdatad_workqueue, &ioend->io_work);
|
|
|
+ queue_work(mp->m_data_workqueue, &ioend->io_work);
|
|
|
else
|
|
|
xfs_destroy_ioend(ioend);
|
|
|
}
|
|
@@ -207,23 +202,9 @@ xfs_end_io(
|
|
|
* We might have to update the on-disk file size after extending
|
|
|
* writes.
|
|
|
*/
|
|
|
- error = xfs_setfilesize(ioend);
|
|
|
- ASSERT(!error || error == EAGAIN);
|
|
|
-
|
|
|
+ xfs_setfilesize(ioend);
|
|
|
done:
|
|
|
- /*
|
|
|
- * If we didn't complete processing of the ioend, requeue it to the
|
|
|
- * tail of the workqueue for another attempt later. Otherwise destroy
|
|
|
- * it.
|
|
|
- */
|
|
|
- if (error == EAGAIN) {
|
|
|
- atomic_inc(&ioend->io_remaining);
|
|
|
- xfs_finish_ioend(ioend);
|
|
|
- /* ensure we don't spin on blocked ioends */
|
|
|
- delay(1);
|
|
|
- } else {
|
|
|
- xfs_destroy_ioend(ioend);
|
|
|
- }
|
|
|
+ xfs_destroy_ioend(ioend);
|
|
|
}
|
|
|
|
|
|
/*
|