|
@@ -18,7 +18,7 @@
|
|
|
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
|
|
dma_addr_t *dma_handle, gfp_t flag)
|
|
|
{
|
|
|
- return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
|
|
|
+ return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
|
|
dma_handle, device_to_mask(dev), flag,
|
|
|
dev_to_node(dev));
|
|
|
}
|
|
@@ -26,7 +26,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
|
|
static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
|
|
void *vaddr, dma_addr_t dma_handle)
|
|
|
{
|
|
|
- iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
|
|
|
+ iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
|
|
|
}
|
|
|
|
|
|
/* Creates TCEs for a user provided buffer. The user buffer must be
|
|
@@ -39,8 +39,8 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
|
|
|
enum dma_data_direction direction,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- return iommu_map_page(dev, dev->archdata.dma_data, page, offset, size,
|
|
|
- device_to_mask(dev), direction, attrs);
|
|
|
+ return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
|
|
|
+ size, device_to_mask(dev), direction, attrs);
|
|
|
}
|
|
|
|
|
|
|
|
@@ -48,7 +48,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|
|
size_t size, enum dma_data_direction direction,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- iommu_unmap_page(dev->archdata.dma_data, dma_handle, size, direction,
|
|
|
+ iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
|
|
|
attrs);
|
|
|
}
|
|
|
|
|
@@ -57,7 +57,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
int nelems, enum dma_data_direction direction,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- return iommu_map_sg(dev, dev->archdata.dma_data, sglist, nelems,
|
|
|
+ return iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
|
|
|
device_to_mask(dev), direction, attrs);
|
|
|
}
|
|
|
|
|
@@ -65,14 +65,14 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|
|
int nelems, enum dma_data_direction direction,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction,
|
|
|
+ iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction,
|
|
|
attrs);
|
|
|
}
|
|
|
|
|
|
/* We support DMA to/from any memory page via the iommu */
|
|
|
static int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|
|
{
|
|
|
- struct iommu_table *tbl = dev->archdata.dma_data;
|
|
|
+ struct iommu_table *tbl = get_iommu_table_base(dev);
|
|
|
|
|
|
if (!tbl || tbl->it_offset > mask) {
|
|
|
printk(KERN_INFO
|