瀏覽代碼

[ARM] dma: add validation of DMA params

Validate the direction argument like x86 does.  In addition,
validate the dma_unmap_* parameters against those passed to
dma_map_* when using the DMA bounce code.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Russell King 16 年之前
父節點
當前提交
0e18b5d7c6
共有 2 個文件被更改,包括 15 次插入4 次删除
  1. 7 4
      arch/arm/common/dmabounce.c
  2. 8 0
      arch/arm/include/asm/dma-mapping.h

+ 7 - 4
arch/arm/common/dmabounce.c

@@ -289,6 +289,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
 
 
 	if (buf) {
 	if (buf) {
 		BUG_ON(buf->size != size);
 		BUG_ON(buf->size != size);
+		BUG_ON(buf->direction != dir);
 
 
 		dev_dbg(dev,
 		dev_dbg(dev,
 			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 			"%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -334,7 +335,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
 	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 		__func__, ptr, size, dir);
 		__func__, ptr, size, dir);
 
 
-	BUG_ON(dir == DMA_NONE);
+	BUG_ON(!valid_dma_direction(dir));
 
 
 	return map_single(dev, ptr, size, dir);
 	return map_single(dev, ptr, size, dir);
 }
 }
@@ -346,7 +347,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
 	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
 	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
 		__func__, page, offset, size, dir);
 		__func__, page, offset, size, dir);
 
 
-	BUG_ON(dir == DMA_NONE);
+	BUG_ON(!valid_dma_direction(dir));
 
 
 	return map_single(dev, page_address(page) + offset, size, dir);
 	return map_single(dev, page_address(page) + offset, size, dir);
 }
 }
@@ -365,8 +366,6 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
 		__func__, (void *) dma_addr, size, dir);
 		__func__, (void *) dma_addr, size, dir);
 
 
-	BUG_ON(dir == DMA_NONE);
-
 	unmap_single(dev, dma_addr, size, dir);
 	unmap_single(dev, dma_addr, size, dir);
 }
 }
 EXPORT_SYMBOL(dma_unmap_single);
 EXPORT_SYMBOL(dma_unmap_single);
@@ -383,6 +382,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
 	if (!buf)
 	if (!buf)
 		return 1;
 		return 1;
 
 
+	BUG_ON(buf->direction != dir);
+
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 		buf->safe, buf->safe_dma_addr);
 		buf->safe, buf->safe_dma_addr);
@@ -410,6 +411,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
 	if (!buf)
 	if (!buf)
 		return 1;
 		return 1;
 
 
+	BUG_ON(buf->direction != dir);
+
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
 		buf->safe, buf->safe_dma_addr);
 		buf->safe, buf->safe_dma_addr);

+ 8 - 0
arch/arm/include/asm/dma-mapping.h

@@ -277,6 +277,8 @@ int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 		size_t size, enum dma_data_direction dir)
 		size_t size, enum dma_data_direction dir)
 {
 {
+	BUG_ON(!valid_dma_direction(dir));
+
 	if (!arch_is_coherent())
 	if (!arch_is_coherent())
 		dma_cache_maint(cpu_addr, size, dir);
 		dma_cache_maint(cpu_addr, size, dir);
 
 
@@ -301,6 +303,8 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 	     unsigned long offset, size_t size, enum dma_data_direction dir)
 	     unsigned long offset, size_t size, enum dma_data_direction dir)
 {
 {
+	BUG_ON(!valid_dma_direction(dir));
+
 	if (!arch_is_coherent())
 	if (!arch_is_coherent())
 		dma_cache_maint(page_address(page) + offset, size, dir);
 		dma_cache_maint(page_address(page) + offset, size, dir);
 
 
@@ -370,6 +374,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
 		dma_addr_t handle, unsigned long offset, size_t size,
 		dma_addr_t handle, unsigned long offset, size_t size,
 		enum dma_data_direction dir)
 		enum dma_data_direction dir)
 {
 {
+	BUG_ON(!valid_dma_direction(dir));
+
 	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
 	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
 		return;
 		return;
 
 
@@ -381,6 +387,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
 		dma_addr_t handle, unsigned long offset, size_t size,
 		dma_addr_t handle, unsigned long offset, size_t size,
 		enum dma_data_direction dir)
 		enum dma_data_direction dir)
 {
 {
+	BUG_ON(!valid_dma_direction(dir));
+
 	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
 	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
 		return;
 		return;