|
@@ -7,9 +7,9 @@
|
|
|
#ifndef _BLACKFIN_DMA_MAPPING_H
|
|
|
#define _BLACKFIN_DMA_MAPPING_H
|
|
|
|
|
|
-#include <asm/scatterlist.h>
|
|
|
+#include <asm/cacheflush.h>
|
|
|
+struct scatterlist;
|
|
|
|
|
|
-void dma_alloc_init(unsigned long start, unsigned long end);
|
|
|
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
|
dma_addr_t *dma_handle, gfp_t gfp);
|
|
|
void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
@@ -20,13 +20,51 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
|
*/
|
|
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
|
+#define dma_supported(d, m) (1)
|
|
|
+#define dma_get_cache_alignment() (32)
|
|
|
+#define dma_is_consistent(d, h) (1)
|
|
|
|
|
|
-static inline
|
|
|
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
+static inline int
|
|
|
+dma_set_mask(struct device *dev, u64 dma_mask)
|
|
|
{
|
|
|
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *dev->dma_mask = dma_mask;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static inline int
|
|
|
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+extern void
|
|
|
+__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
|
|
+static inline void
|
|
|
+_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ if (!__builtin_constant_p(dir)) {
|
|
|
+ __dma_sync(addr, size, dir);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (dir) {
|
|
|
+ case DMA_NONE:
|
|
|
+ BUG();
|
|
|
+ case DMA_TO_DEVICE: /* writeback only */
|
|
|
+ flush_dcache_range(addr, addr + size);
|
|
|
+ break;
|
|
|
+ case DMA_FROM_DEVICE: /* invalidate only */
|
|
|
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
|
|
|
+ /* Blackfin has no dedicated invalidate (it includes a flush) */
|
|
|
+ invalidate_dcache_range(addr, addr + size);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Map a single buffer of the indicated size for DMA in streaming mode.
|
|
|
* The 32-bit bus address to use is returned.
|
|
@@ -34,8 +72,13 @@ int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
* Once the device is given the dma address, the device owns this memory
|
|
|
* until either pci_unmap_single or pci_dma_sync_single is performed.
|
|
|
*/
|
|
|
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
|
|
- enum dma_data_direction direction);
|
|
|
+static inline dma_addr_t
|
|
|
+dma_map_single(struct device *dev, void *ptr, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ _dma_sync((dma_addr_t)ptr, size, dir);
|
|
|
+ return (dma_addr_t) ptr;
|
|
|
+}
|
|
|
|
|
|
static inline dma_addr_t
|
|
|
dma_map_page(struct device *dev, struct page *page,
|
|
@@ -53,8 +96,12 @@ dma_map_page(struct device *dev, struct page *page,
|
|
|
* After this call, reads by the cpu to the buffer are guarenteed to see
|
|
|
* whatever the device wrote there.
|
|
|
*/
|
|
|
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
|
- enum dma_data_direction direction);
|
|
|
+static inline void
|
|
|
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+}
|
|
|
|
|
|
static inline void
|
|
|
dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
@@ -80,38 +127,66 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|
|
* the same here.
|
|
|
*/
|
|
|
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
- enum dma_data_direction direction);
|
|
|
+ enum dma_data_direction dir);
|
|
|
|
|
|
/*
|
|
|
* Unmap a set of streaming mode DMA translations.
|
|
|
* Again, cpu read rules concerning calls here are the same as for
|
|
|
* pci_unmap_single() above.
|
|
|
*/
|
|
|
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
- int nhwentries, enum dma_data_direction direction);
|
|
|
+static inline void
|
|
|
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nhwentries, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+}
|
|
|
|
|
|
-static inline void dma_sync_single_for_cpu(struct device *dev,
|
|
|
- dma_addr_t handle, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
+static inline void
|
|
|
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
|
|
|
+ unsigned long offset, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
}
|
|
|
|
|
|
-static inline void dma_sync_single_for_device(struct device *dev,
|
|
|
- dma_addr_t handle, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
+static inline void
|
|
|
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
|
|
|
+ unsigned long offset, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
+ _dma_sync(handle + offset, size, dir);
|
|
|
}
|
|
|
|
|
|
-static inline void dma_sync_sg_for_cpu(struct device *dev,
|
|
|
- struct scatterlist *sg,
|
|
|
- int nents, enum dma_data_direction dir)
|
|
|
+static inline void
|
|
|
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
+ dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
|
|
}
|
|
|
|
|
|
-static inline void dma_sync_sg_for_device(struct device *dev,
|
|
|
- struct scatterlist *sg,
|
|
|
- int nents, enum dma_data_direction dir)
|
|
|
+static inline void
|
|
|
+dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
+ enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+}
|
|
|
+
|
|
|
+extern void
|
|
|
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nents, enum dma_data_direction dir);
|
|
|
+
|
|
|
+static inline void
|
|
|
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
+ _dma_sync((dma_addr_t)vaddr, size, dir);
|
|
|
}
|
|
|
|
|
|
#endif /* _BLACKFIN_DMA_MAPPING_H */
|