|
@@ -73,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
}
|
|
@@ -96,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
struct dma_attrs *attrs)
|
|
|
{
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
|
|
handle & ~PAGE_MASK, size, dir);
|
|
|
}
|
|
@@ -1207,7 +1207,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
*/
|
|
|
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
size_t size, dma_addr_t *handle,
|
|
|
- enum dma_data_direction dir)
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
{
|
|
|
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
dma_addr_t iova, iova_base;
|
|
@@ -1226,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
|
|
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
|
|
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() &&
|
|
|
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
|
|
|
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
|
@@ -1273,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
|
|
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
|
|
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
|
|
- dir) < 0)
|
|
|
+ dir, attrs) < 0)
|
|
|
goto bad_mapping;
|
|
|
|
|
|
dma->dma_address += offset;
|
|
@@ -1286,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
}
|
|
|
size += s->length;
|
|
|
}
|
|
|
- if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
|
|
|
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
|
|
|
goto bad_mapping;
|
|
|
|
|
|
dma->dma_address += offset;
|
|
@@ -1320,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
if (sg_dma_len(s))
|
|
|
__iommu_remove_mapping(dev, sg_dma_address(s),
|
|
|
sg_dma_len(s));
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() &&
|
|
|
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset,
|
|
|
s->length, dir);
|
|
|
}
|
|
@@ -1382,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
|
|
dma_addr_t dma_addr;
|
|
|
int ret, len = PAGE_ALIGN(size + offset);
|
|
|
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
|
|
|
dma_addr = __alloc_iova(mapping, len);
|
|
@@ -1421,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
if (!iova)
|
|
|
return;
|
|
|
|
|
|
- if (!arch_is_coherent())
|
|
|
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
__dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
|
|
|
iommu_unmap(mapping->domain, iova, len);
|