|
@@ -3,11 +3,48 @@
|
|
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
-#include <linux/mm.h> /* need struct page */
|
|
|
-
|
|
|
+#include <linux/mm_types.h>
|
|
|
#include <linux/scatterlist.h>
|
|
|
|
|
|
#include <asm-generic/dma-coherent.h>
|
|
|
+#include <asm/memory.h>
|
|
|
+
|
|
|
+/*
|
|
|
+ * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
|
|
|
+ * used internally by the DMA-mapping API to provide DMA addresses. They
|
|
|
+ * must not be used by drivers.
|
|
|
+ */
|
|
|
+#ifndef __arch_page_to_dma
|
|
|
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
|
|
+{
|
|
|
+ return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
|
|
|
+}
|
|
|
+
|
|
|
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
|
|
+{
|
|
|
+ return (void *)__bus_to_virt(addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|
|
+{
|
|
|
+ return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
|
|
+{
|
|
|
+ return __arch_page_to_dma(dev, page);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
|
|
+{
|
|
|
+ return __arch_dma_to_virt(dev, addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|
|
+{
|
|
|
+ return __arch_virt_to_dma(dev, addr);
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* DMA-consistent mapping functions. These allocate/free a region of
|
|
@@ -169,7 +206,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
|
|
if (!arch_is_coherent())
|
|
|
dma_cache_maint(cpu_addr, size, dir);
|
|
|
|
|
|
- return virt_to_dma(dev, (unsigned long)cpu_addr);
|
|
|
+ return virt_to_dma(dev, cpu_addr);
|
|
|
}
|
|
|
#else
|
|
|
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
|
|
@@ -195,7 +232,7 @@ dma_map_page(struct device *dev, struct page *page,
|
|
|
unsigned long offset, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
|
|
|
+ return dma_map_single(dev, page_address(page) + offset, size, dir);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -241,7 +278,7 @@ static inline void
|
|
|
dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- dma_unmap_single(dev, handle, size, (int)dir);
|
|
|
+ dma_unmap_single(dev, handle, size, dir);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -336,7 +373,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
if (!arch_is_coherent())
|
|
|
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
|
|
|
+ dma_cache_maint(dma_to_virt(dev, handle), size, dir);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
@@ -344,7 +381,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
if (!arch_is_coherent())
|
|
|
- dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
|
|
|
+ dma_cache_maint(dma_to_virt(dev, handle), size, dir);
|
|
|
}
|
|
|
#else
|
|
|
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
|