|
@@ -406,35 +406,31 @@ EXPORT_SYMBOL(dma_free_coherent);
|
|
|
*/
|
|
|
static void dma_cache_maint(const void *start, size_t size, int direction)
|
|
|
{
|
|
|
- void (*inner_op)(const void *, const void *);
|
|
|
void (*outer_op)(unsigned long, unsigned long);
|
|
|
|
|
|
- BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
|
|
|
-
|
|
|
switch (direction) {
|
|
|
case DMA_FROM_DEVICE: /* invalidate only */
|
|
|
- inner_op = dmac_inv_range;
|
|
|
outer_op = outer_inv_range;
|
|
|
break;
|
|
|
case DMA_TO_DEVICE: /* writeback only */
|
|
|
- inner_op = dmac_clean_range;
|
|
|
outer_op = outer_clean_range;
|
|
|
break;
|
|
|
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
|
- inner_op = dmac_flush_range;
|
|
|
outer_op = outer_flush_range;
|
|
|
break;
|
|
|
default:
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
- inner_op(start, start + size);
|
|
|
outer_op(__pa(start), __pa(start) + size);
|
|
|
}
|
|
|
|
|
|
void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
|
|
+
|
|
|
+ dmac_map_area(kaddr, size, dir);
|
|
|
dma_cache_maint(kaddr, size, dir);
|
|
|
}
|
|
|
EXPORT_SYMBOL(___dma_single_cpu_to_dev);
|
|
@@ -442,12 +438,15 @@ EXPORT_SYMBOL(___dma_single_cpu_to_dev);
|
|
|
void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- /* nothing to do */
|
|
|
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
|
|
|
+
|
|
|
+ dmac_unmap_area(kaddr, size, dir);
|
|
|
}
|
|
|
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
|
|
|
|
|
|
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
- size_t size, void (*op)(const void *, const void *))
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ void (*op)(const void *, size_t, int))
|
|
|
{
|
|
|
/*
|
|
|
* A single sg entry may refer to multiple physically contiguous
|
|
@@ -471,12 +470,12 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
vaddr = kmap_high_get(page);
|
|
|
if (vaddr) {
|
|
|
vaddr += offset;
|
|
|
- op(vaddr, vaddr + len);
|
|
|
+ op(vaddr, len, dir);
|
|
|
kunmap_high(page);
|
|
|
}
|
|
|
} else {
|
|
|
vaddr = page_address(page) + offset;
|
|
|
- op(vaddr, vaddr + len);
|
|
|
+ op(vaddr, len, dir);
|
|
|
}
|
|
|
offset = 0;
|
|
|
page++;
|
|
@@ -488,27 +487,23 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
unsigned long paddr;
|
|
|
- void (*inner_op)(const void *, const void *);
|
|
|
void (*outer_op)(unsigned long, unsigned long);
|
|
|
|
|
|
switch (direction) {
|
|
|
case DMA_FROM_DEVICE: /* invalidate only */
|
|
|
- inner_op = dmac_inv_range;
|
|
|
outer_op = outer_inv_range;
|
|
|
break;
|
|
|
case DMA_TO_DEVICE: /* writeback only */
|
|
|
- inner_op = dmac_clean_range;
|
|
|
outer_op = outer_clean_range;
|
|
|
break;
|
|
|
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
|
- inner_op = dmac_flush_range;
|
|
|
outer_op = outer_flush_range;
|
|
|
break;
|
|
|
default:
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
- dma_cache_maint_page(page, off, size, inner_op);
|
|
|
+ dma_cache_maint_page(page, off, size, dir, dmac_map_area);
|
|
|
|
|
|
paddr = page_to_phys(page) + off;
|
|
|
outer_op(paddr, paddr + size);
|
|
@@ -518,7 +513,7 @@ EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
|
|
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- /* nothing to do */
|
|
|
+ dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
|
|
|
}
|
|
|
EXPORT_SYMBOL(___dma_page_dev_to_cpu);
|
|
|
|