|
@@ -17,50 +17,9 @@ extern int iommu_merge;
|
|
|
extern struct device x86_dma_fallback_dev;
|
|
|
extern int panic_on_overflow;
|
|
|
|
|
|
-struct dma_mapping_ops {
|
|
|
- int (*mapping_error)(struct device *dev,
|
|
|
- dma_addr_t dma_addr);
|
|
|
- void* (*alloc_coherent)(struct device *dev, size_t size,
|
|
|
- dma_addr_t *dma_handle, gfp_t gfp);
|
|
|
- void (*free_coherent)(struct device *dev, size_t size,
|
|
|
- void *vaddr, dma_addr_t dma_handle);
|
|
|
- void (*sync_single_for_cpu)(struct device *hwdev,
|
|
|
- dma_addr_t dma_handle, size_t size,
|
|
|
- int direction);
|
|
|
- void (*sync_single_for_device)(struct device *hwdev,
|
|
|
- dma_addr_t dma_handle, size_t size,
|
|
|
- int direction);
|
|
|
- void (*sync_single_range_for_cpu)(struct device *hwdev,
|
|
|
- dma_addr_t dma_handle, unsigned long offset,
|
|
|
- size_t size, int direction);
|
|
|
- void (*sync_single_range_for_device)(struct device *hwdev,
|
|
|
- dma_addr_t dma_handle, unsigned long offset,
|
|
|
- size_t size, int direction);
|
|
|
- void (*sync_sg_for_cpu)(struct device *hwdev,
|
|
|
- struct scatterlist *sg, int nelems,
|
|
|
- int direction);
|
|
|
- void (*sync_sg_for_device)(struct device *hwdev,
|
|
|
- struct scatterlist *sg, int nelems,
|
|
|
- int direction);
|
|
|
- int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nents, int direction);
|
|
|
- void (*unmap_sg)(struct device *hwdev,
|
|
|
- struct scatterlist *sg, int nents,
|
|
|
- int direction);
|
|
|
- dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
|
|
- unsigned long offset, size_t size,
|
|
|
- enum dma_data_direction dir,
|
|
|
- struct dma_attrs *attrs);
|
|
|
- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
|
|
|
- size_t size, enum dma_data_direction dir,
|
|
|
- struct dma_attrs *attrs);
|
|
|
- int (*dma_supported)(struct device *hwdev, u64 mask);
|
|
|
- int is_phys;
|
|
|
-};
|
|
|
-
|
|
|
-extern struct dma_mapping_ops *dma_ops;
|
|
|
-
|
|
|
-static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
|
|
+extern struct dma_map_ops *dma_ops;
|
|
|
+
|
|
|
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
|
{
|
|
|
#ifdef CONFIG_X86_32
|
|
|
return dma_ops;
|
|
@@ -75,7 +34,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
|
|
/* Make sure we keep the same behaviour */
|
|
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
if (ops->mapping_error)
|
|
|
return ops->mapping_error(dev, dma_addr);
|
|
|
|
|
@@ -94,138 +53,139 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
|
|
static inline dma_addr_t
|
|
|
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
|
|
- int direction)
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
return ops->map_page(hwdev, virt_to_page(ptr),
|
|
|
(unsigned long)ptr & ~PAGE_MASK, size,
|
|
|
- direction, NULL);
|
|
|
+ dir, NULL);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
|
|
- int direction)
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->unmap_page)
|
|
|
- ops->unmap_page(dev, addr, size, direction, NULL);
|
|
|
+ ops->unmap_page(dev, addr, size, dir, NULL);
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
|
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nents, int direction)
|
|
|
+ int nents, enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
- return ops->map_sg(hwdev, sg, nents, direction);
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+ return ops->map_sg(hwdev, sg, nents, dir, NULL);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
|
- int direction)
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->unmap_sg)
|
|
|
- ops->unmap_sg(hwdev, sg, nents, direction);
|
|
|
+ ops->unmap_sg(hwdev, sg, nents, dir, NULL);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- size_t size, int direction)
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_single_for_cpu)
|
|
|
- ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
|
|
|
+ ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- size_t size, int direction)
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_single_for_device)
|
|
|
- ops->sync_single_for_device(hwdev, dma_handle, size, direction);
|
|
|
+ ops->sync_single_for_device(hwdev, dma_handle, size, dir);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
- unsigned long offset, size_t size, int direction)
|
|
|
+ unsigned long offset, size_t size,
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_single_range_for_cpu)
|
|
|
ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
|
- size, direction);
|
|
|
+ size, dir);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
unsigned long offset, size_t size,
|
|
|
- int direction)
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_single_range_for_device)
|
|
|
ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
|
- offset, size, direction);
|
|
|
+ offset, size, dir);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nelems, int direction)
|
|
|
+ int nelems, enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_sg_for_cpu)
|
|
|
- ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
|
|
+ ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
|
- int nelems, int direction)
|
|
|
+ int nelems, enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
if (ops->sync_sg_for_device)
|
|
|
- ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
|
|
+ ops->sync_sg_for_device(hwdev, sg, nelems, dir);
|
|
|
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
|
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
size_t offset, size_t size,
|
|
|
- int direction)
|
|
|
+ enum dma_data_direction dir)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
- return ops->map_page(dev, page, offset, size, direction, NULL);
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+ return ops->map_page(dev, page, offset, size, dir, NULL);
|
|
|
}
|
|
|
|
|
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|
|
|
- size_t size, int direction)
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
- dma_unmap_single(dev, addr, size, direction);
|
|
|
+ dma_unmap_single(dev, addr, size, dir);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
@@ -271,7 +231,7 @@ static inline void *
|
|
|
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
|
gfp_t gfp)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
void *memory;
|
|
|
|
|
|
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
|
@@ -297,7 +257,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
|
static inline void dma_free_coherent(struct device *dev, size_t size,
|
|
|
void *vaddr, dma_addr_t bus)
|
|
|
{
|
|
|
- struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
|
|
WARN_ON(irqs_disabled()); /* for portability */
|
|
|
|