|
@@ -600,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
|
|
vio_cmo_dealloc(viodev, alloc_size);
|
|
|
}
|
|
|
|
|
|
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|
|
+{
|
|
|
+ return dma_iommu_ops.dma_supported(dev, mask);
|
|
|
+}
|
|
|
+
|
|
|
struct dma_map_ops vio_dma_mapping_ops = {
|
|
|
.alloc_coherent = vio_dma_iommu_alloc_coherent,
|
|
|
.free_coherent = vio_dma_iommu_free_coherent,
|
|
@@ -607,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
|
|
|
.unmap_sg = vio_dma_iommu_unmap_sg,
|
|
|
.map_page = vio_dma_iommu_map_page,
|
|
|
.unmap_page = vio_dma_iommu_unmap_page,
|
|
|
+ .dma_supported = vio_dma_iommu_dma_supported,
|
|
|
|
|
|
};
|
|
|
|
|
@@ -858,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
|
|
|
|
|
|
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
|
|
|
{
|
|
|
- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
|
|
|
- viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
|
|
|
+ set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1244,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
|
|
|
if (firmware_has_feature(FW_FEATURE_CMO))
|
|
|
vio_cmo_set_dma_ops(viodev);
|
|
|
else
|
|
|
- viodev->dev.archdata.dma_ops = &dma_iommu_ops;
|
|
|
+ set_dma_ops(&viodev->dev, &dma_iommu_ops);
|
|
|
set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
|
|
|
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
|
|
|
|