|
@@ -5,6 +5,7 @@
|
|
|
|
|
|
#include <linux/mm_types.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
+#include <linux/dma-debug.h>
|
|
|
|
|
|
#include <asm-generic/dma-coherent.h>
|
|
|
#include <asm/memory.h>
|
|
@@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
|
|
/*
|
|
|
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
|
|
*/
|
|
|
-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
|
|
|
+extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
|
|
|
enum dma_data_direction);
|
|
|
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
|
|
|
+extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
|
|
|
enum dma_data_direction);
|
|
|
-extern dma_addr_t dma_map_page(struct device *, struct page *,
|
|
|
+extern dma_addr_t __dma_map_page(struct device *, struct page *,
|
|
|
unsigned long, size_t, enum dma_data_direction);
|
|
|
-extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
|
|
|
+extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
|
|
enum dma_data_direction);
|
|
|
|
|
|
/*
|
|
@@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|
|
}
|
|
|
|
|
|
|
|
|
+static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ __dma_single_cpu_to_dev(cpu_addr, size, dir);
|
|
|
+ return virt_to_dma(dev, cpu_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ __dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
|
|
+ handle & ~PAGE_MASK, size, dir);
|
|
|
+}
|
|
|
+#endif /* CONFIG_DMABOUNCE */
|
|
|
+
|
|
|
/**
|
|
|
* dma_map_single - map a single buffer for streaming DMA
|
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
@@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|
|
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
+ dma_addr_t addr;
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
|
|
- __dma_single_cpu_to_dev(cpu_addr, size, dir);
|
|
|
+ addr = __dma_map_single(dev, cpu_addr, size, dir);
|
|
|
+ debug_dma_map_page(dev, virt_to_page(cpu_addr),
|
|
|
+ (unsigned long)cpu_addr & ~PAGE_MASK, size,
|
|
|
+ dir, addr, true);
|
|
|
|
|
|
- return virt_to_dma(dev, cpu_addr);
|
|
|
+ return addr;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
|
|
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
unsigned long offset, size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
+ dma_addr_t addr;
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
|
|
- __dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
+ addr = __dma_map_page(dev, page, offset, size, dir);
|
|
|
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
|
|
|
|
|
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
+ return addr;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
|
|
+ debug_dma_unmap_page(dev, handle, size, dir, true);
|
|
|
+ __dma_unmap_single(dev, handle, size, dir);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
|
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
|
|
- handle & ~PAGE_MASK, size, dir);
|
|
|
+ debug_dma_unmap_page(dev, handle, size, dir, false);
|
|
|
+ __dma_unmap_page(dev, handle, size, dir);
|
|
|
}
|
|
|
-#endif /* CONFIG_DMABOUNCE */
|
|
|
|
|
|
/**
|
|
|
* dma_sync_single_range_for_cpu
|
|
@@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
|
|
{
|
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
|
|
+ debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
|
|
+
|
|
|
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
|
|
|
return;
|
|
|
|
|
@@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
|
|
{
|
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
|
|
+ debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
|
|
|
+
|
|
|
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
|
|
return;
|
|
|
|