|
@@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
|
|
|
EXPORT_SYMBOL(___dma_single_dev_to_cpu);
|
|
|
|
|
|
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
|
|
|
- size_t size, int direction)
|
|
|
+ size_t size, void (*op)(const void *, const void *))
|
|
|
{
|
|
|
void *vaddr;
|
|
|
- unsigned long paddr;
|
|
|
- void (*inner_op)(const void *, const void *);
|
|
|
- void (*outer_op)(unsigned long, unsigned long);
|
|
|
-
|
|
|
- switch (direction) {
|
|
|
- case DMA_FROM_DEVICE: /* invalidate only */
|
|
|
- inner_op = dmac_inv_range;
|
|
|
- outer_op = outer_inv_range;
|
|
|
- break;
|
|
|
- case DMA_TO_DEVICE: /* writeback only */
|
|
|
- inner_op = dmac_clean_range;
|
|
|
- outer_op = outer_clean_range;
|
|
|
- break;
|
|
|
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
|
- inner_op = dmac_flush_range;
|
|
|
- outer_op = outer_flush_range;
|
|
|
- break;
|
|
|
- default:
|
|
|
- BUG();
|
|
|
- }
|
|
|
|
|
|
if (!PageHighMem(page)) {
|
|
|
vaddr = page_address(page) + offset;
|
|
|
- inner_op(vaddr, vaddr + size);
|
|
|
+ op(vaddr, vaddr + size);
|
|
|
} else {
|
|
|
vaddr = kmap_high_get(page);
|
|
|
if (vaddr) {
|
|
|
vaddr += offset;
|
|
|
- inner_op(vaddr, vaddr + size);
|
|
|
+ op(vaddr, vaddr + size);
|
|
|
kunmap_high(page);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- paddr = page_to_phys(page) + offset;
|
|
|
- outer_op(paddr, paddr + size);
|
|
|
}
|
|
|
|
|
|
static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
- size_t size, int dir)
|
|
|
+ size_t size, void (*op)(const void *, const void *))
|
|
|
{
|
|
|
/*
|
|
|
* A single sg entry may refer to multiple physically contiguous
|
|
@@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
}
|
|
|
len = PAGE_SIZE - offset;
|
|
|
}
|
|
|
- dma_cache_maint_contiguous(page, offset, len, dir);
|
|
|
+ dma_cache_maint_contiguous(page, offset, len, op);
|
|
|
offset = 0;
|
|
|
page++;
|
|
|
left -= len;
|
|
@@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- dma_cache_maint_page(page, off, size, dir);
|
|
|
+ unsigned long paddr;
|
|
|
+ void (*inner_op)(const void *, const void *);
|
|
|
+ void (*outer_op)(unsigned long, unsigned long);
|
|
|
+
|
|
|
+ switch (direction) {
|
|
|
+ case DMA_FROM_DEVICE: /* invalidate only */
|
|
|
+ inner_op = dmac_inv_range;
|
|
|
+ outer_op = outer_inv_range;
|
|
|
+ break;
|
|
|
+ case DMA_TO_DEVICE: /* writeback only */
|
|
|
+ inner_op = dmac_clean_range;
|
|
|
+ outer_op = outer_clean_range;
|
|
|
+ break;
|
|
|
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
|
+ inner_op = dmac_flush_range;
|
|
|
+ outer_op = outer_flush_range;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG();
|
|
|
+ }
|
|
|
+
|
|
|
+ dma_cache_maint_page(page, off, size, inner_op);
|
|
|
+
|
|
|
+ paddr = page_to_phys(page) + off;
|
|
|
+ outer_op(paddr, paddr + size);
|
|
|
}
|
|
|
EXPORT_SYMBOL(___dma_page_cpu_to_dev);
|
|
|
|