|
@@ -33,6 +33,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+#include <asm-generic/dma-mapping-common.h>
|
|
|
+
|
|
|
/* Make sure we keep the same behaviour */
|
|
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
{
|
|
@@ -53,177 +55,6 @@ extern int dma_set_mask(struct device *dev, u64 mask);
|
|
|
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|
|
dma_addr_t *dma_addr, gfp_t flag);
|
|
|
|
|
|
-static inline dma_addr_t
|
|
|
-dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
- dma_addr_t addr;
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- kmemcheck_mark_initialized(ptr, size);
|
|
|
- addr = ops->map_page(hwdev, virt_to_page(ptr),
|
|
|
- (unsigned long)ptr & ~PAGE_MASK, size,
|
|
|
- dir, NULL);
|
|
|
- debug_dma_map_page(hwdev, virt_to_page(ptr),
|
|
|
- (unsigned long)ptr & ~PAGE_MASK, size,
|
|
|
- dir, addr, true);
|
|
|
- return addr;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->unmap_page)
|
|
|
- ops->unmap_page(dev, addr, size, dir, NULL);
|
|
|
- debug_dma_unmap_page(dev, addr, size, dir, true);
|
|
|
-}
|
|
|
-
|
|
|
-static inline int
|
|
|
-dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nents, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
- int ents;
|
|
|
- struct scatterlist *s;
|
|
|
- int i;
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- for_each_sg(sg, s, nents, i)
|
|
|
- kmemcheck_mark_initialized(sg_virt(s), s->length);
|
|
|
- ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
|
|
|
- debug_dma_map_sg(hwdev, sg, nents, ents, dir);
|
|
|
-
|
|
|
- return ents;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- debug_dma_unmap_sg(hwdev, sg, nents, dir);
|
|
|
- if (ops->unmap_sg)
|
|
|
- ops->unmap_sg(hwdev, sg, nents, dir, NULL);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- size_t size, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_single_for_cpu)
|
|
|
- ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
|
|
|
- debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- size_t size, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_single_for_device)
|
|
|
- ops->sync_single_for_device(hwdev, dma_handle, size, dir);
|
|
|
- debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_single_range_for_cpu)
|
|
|
- ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
|
- size, dir);
|
|
|
- debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
|
|
|
- offset, size, dir);
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_single_range_for_device)
|
|
|
- ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
|
- offset, size, dir);
|
|
|
- debug_dma_sync_single_range_for_device(hwdev, dma_handle,
|
|
|
- offset, size, dir);
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nelems, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_sg_for_cpu)
|
|
|
- ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
|
|
|
- debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline void
|
|
|
-dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nelems, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->sync_sg_for_device)
|
|
|
- ops->sync_sg_for_device(hwdev, sg, nelems, dir);
|
|
|
- debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
|
|
|
-
|
|
|
- flush_write_buffers();
|
|
|
-}
|
|
|
-
|
|
|
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
- size_t offset, size_t size,
|
|
|
- enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
- dma_addr_t addr;
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- kmemcheck_mark_initialized(page_address(page) + offset, size);
|
|
|
- addr = ops->map_page(dev, page, offset, size, dir, NULL);
|
|
|
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
|
|
-
|
|
|
- return addr;
|
|
|
-}
|
|
|
-
|
|
|
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
|
|
- size_t size, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
-
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
- if (ops->unmap_page)
|
|
|
- ops->unmap_page(dev, addr, size, dir, NULL);
|
|
|
- debug_dma_unmap_page(dev, addr, size, dir, false);
|
|
|
-}
|
|
|
-
|
|
|
static inline void
|
|
|
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
|
enum dma_data_direction dir)
|