Преглед на файлове

[ARM] Rename consistent_sync() as dma_cache_maint()

consistent_sync() is used to handle the cache maintainence issues with
DMA operations.  Since we've now removed the misuse of this function
from the two MTD drivers, rename it to prevent future mis-use.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Russell King преди 17 години
родител
ревизия
84aa462e2c
променени са 3 файла, в които са добавени 11 реда и са изтрити 11 реда
  1. 2 2
      arch/arm/common/dmabounce.c
  2. 2 2
      arch/arm/mm/consistent.c
  3. 7 7
      include/asm-arm/dma-mapping.h

+ 2 - 2
arch/arm/common/dmabounce.c

@@ -263,7 +263,7 @@ map_single(struct device *dev, void *ptr, size_t size,
 		 * We don't need to sync the DMA buffer since
 		 * it was allocated via the coherent allocators.
 		 */
-		consistent_sync(ptr, size, dir);
+		dma_cache_maint(ptr, size, dir);
 	}
 
 	return dma_addr;
@@ -383,7 +383,7 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 		 * via the coherent allocators.
 		 */
 	} else {
-		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
+		dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir);
 	}
 }
 

+ 2 - 2
arch/arm/mm/consistent.c

@@ -481,7 +481,7 @@ core_initcall(consistent_init);
  * platforms with CONFIG_DMABOUNCE.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-void consistent_sync(const void *start, size_t size, int direction)
+void dma_cache_maint(const void *start, size_t size, int direction)
 {
 	const void *end = start + size;
 
@@ -504,4 +504,4 @@ void consistent_sync(const void *start, size_t size, int direction)
 		BUG();
 	}
 }
-EXPORT_SYMBOL(consistent_sync);
+EXPORT_SYMBOL(dma_cache_maint);

+ 7 - 7
include/asm-arm/dma-mapping.h

@@ -17,7 +17,7 @@
  * platforms with CONFIG_DMABOUNCE.
  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  */
-extern void consistent_sync(const void *kaddr, size_t size, int rw);
+extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -165,7 +165,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
 	       enum dma_data_direction dir)
 {
 	if (!arch_is_coherent())
-		consistent_sync(cpu_addr, size, dir);
+		dma_cache_maint(cpu_addr, size, dir);
 
 	return virt_to_dma(dev, (unsigned long)cpu_addr);
 }
@@ -278,7 +278,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 		virt = page_address(sg->page) + sg->offset;
 
 		if (!arch_is_coherent())
-			consistent_sync(virt, sg->length, dir);
+			dma_cache_maint(virt, sg->length, dir);
 	}
 
 	return nents;
@@ -334,7 +334,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
 			enum dma_data_direction dir)
 {
 	if (!arch_is_coherent())
-		consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+		dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
 }
 
 static inline void
@@ -342,7 +342,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
 			   enum dma_data_direction dir)
 {
 	if (!arch_is_coherent())
-		consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+		dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
 }
 #else
 extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
@@ -373,7 +373,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
 	for (i = 0; i < nents; i++, sg++) {
 		char *virt = page_address(sg->page) + sg->offset;
 		if (!arch_is_coherent())
-			consistent_sync(virt, sg->length, dir);
+			dma_cache_maint(virt, sg->length, dir);
 	}
 }
 
@@ -386,7 +386,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
 	for (i = 0; i < nents; i++, sg++) {
 		char *virt = page_address(sg->page) + sg->offset;
 		if (!arch_is_coherent())
-			consistent_sync(virt, sg->length, dir);
+			dma_cache_maint(virt, sg->length, dir);
 	}
 }
 #else