|
@@ -107,6 +107,18 @@ xfs_page_trace(
|
|
|
#define xfs_page_trace(tag, inode, page, pgoff)
|
|
|
#endif
|
|
|
|
|
|
+STATIC struct block_device *
|
|
|
+xfs_find_bdev_for_inode(
|
|
|
+ struct xfs_inode *ip)
|
|
|
+{
|
|
|
+ struct xfs_mount *mp = ip->i_mount;
|
|
|
+
|
|
|
+ if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
|
|
|
+ return mp->m_rtdev_targp->bt_bdev;
|
|
|
+ else
|
|
|
+ return mp->m_ddev_targp->bt_bdev;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Schedule IO completion handling on a xfsdatad if this was
|
|
|
* the final hold on this ioend. If we are asked to wait,
|
|
@@ -1471,28 +1483,21 @@ xfs_vm_direct_IO(
|
|
|
{
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
- xfs_iomap_t iomap;
|
|
|
- int maps = 1;
|
|
|
- int error;
|
|
|
+ struct block_device *bdev;
|
|
|
ssize_t ret;
|
|
|
|
|
|
- error = xfs_bmap(XFS_I(inode), offset, 0,
|
|
|
- BMAPI_DEVICE, &iomap, &maps);
|
|
|
- if (error)
|
|
|
- return -error;
|
|
|
+ bdev = xfs_find_bdev_for_inode(XFS_I(inode));
|
|
|
|
|
|
if (rw == WRITE) {
|
|
|
iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
|
|
|
ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
|
|
|
- iomap.iomap_target->bt_bdev,
|
|
|
- iov, offset, nr_segs,
|
|
|
+ bdev, iov, offset, nr_segs,
|
|
|
xfs_get_blocks_direct,
|
|
|
xfs_end_io_direct);
|
|
|
} else {
|
|
|
iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
|
|
|
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
|
|
|
- iomap.iomap_target->bt_bdev,
|
|
|
- iov, offset, nr_segs,
|
|
|
+ bdev, iov, offset, nr_segs,
|
|
|
xfs_get_blocks_direct,
|
|
|
xfs_end_io_direct);
|
|
|
}
|