|
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
|
|
|
extern int force_iommu;
|
|
|
|
|
|
struct dma_mapping_ops {
|
|
|
- int (*mapping_error)(dma_addr_t dma_addr);
|
|
|
+ int (*mapping_error)(struct device *dev,
|
|
|
+ dma_addr_t dma_addr);
|
|
|
void* (*alloc_coherent)(struct device *dev, size_t size,
|
|
|
dma_addr_t *dma_handle, gfp_t gfp);
|
|
|
void (*free_coherent)(struct device *dev, size_t size,
|
|
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
|
|
|
int is_phys;
|
|
|
};
|
|
|
|
|
|
-extern const struct dma_mapping_ops *dma_ops;
|
|
|
+extern struct dma_mapping_ops *dma_ops;
|
|
|
|
|
|
-static inline int dma_mapping_error(dma_addr_t dma_addr)
|
|
|
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
|
|
{
|
|
|
- if (dma_ops->mapping_error)
|
|
|
- return dma_ops->mapping_error(dma_addr);
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
+ return dma_ops;
|
|
|
+#else
|
|
|
+ if (unlikely(!dev) || !dev->archdata.dma_ops)
|
|
|
+ return dma_ops;
|
|
|
+ else
|
|
|
+ return dev->archdata.dma_ops;
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/* Make sure we keep the same behaviour */
|
|
|
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
|
+{
|
|
|
+#ifdef CONFIG_X86_32
|
|
|
+ return 0;
|
|
|
+#else
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+ if (ops->mapping_error)
|
|
|
+ return ops->mapping_error(dev, dma_addr);
|
|
|
|
|
|
return (dma_addr == bad_dma_address);
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
@@ -83,44 +102,53 @@ static inline dma_addr_t
|
|
|
dma_map_single(struct device *hwdev, void *ptr, size_t size,
|
|
|
int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
|
|
+ return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
|
|
|
int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->unmap_single)
|
|
|
- dma_ops->unmap_single(dev, addr, size, direction);
|
|
|
+ if (ops->unmap_single)
|
|
|
+ ops->unmap_single(dev, addr, size, direction);
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
|
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|
|
int nents, int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- return dma_ops->map_sg(hwdev, sg, nents, direction);
|
|
|
+ return ops->map_sg(hwdev, sg, nents, direction);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
|
int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->unmap_sg)
|
|
|
- dma_ops->unmap_sg(hwdev, sg, nents, direction);
|
|
|
+ if (ops->unmap_sg)
|
|
|
+ ops->unmap_sg(hwdev, sg, nents, direction);
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
|
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
size_t size, int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_single_for_cpu)
|
|
|
- dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
|
|
|
- direction);
|
|
|
+ if (ops->sync_single_for_cpu)
|
|
|
+ ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
@@ -128,10 +156,11 @@ static inline void
|
|
|
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
size_t size, int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_single_for_device)
|
|
|
- dma_ops->sync_single_for_device(hwdev, dma_handle, size,
|
|
|
- direction);
|
|
|
+ if (ops->sync_single_for_device)
|
|
|
+ ops->sync_single_for_device(hwdev, dma_handle, size, direction);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
@@ -139,11 +168,12 @@ static inline void
|
|
|
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
unsigned long offset, size_t size, int direction)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_single_range_for_cpu)
|
|
|
- dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
|
- size, direction);
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
+ BUG_ON(!valid_dma_direction(direction));
|
|
|
+ if (ops->sync_single_range_for_cpu)
|
|
|
+ ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
|
|
|
+ size, direction);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
|
|
|
unsigned long offset, size_t size,
|
|
|
int direction)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_single_range_for_device)
|
|
|
- dma_ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
|
- offset, size, direction);
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
|
|
|
+ BUG_ON(!valid_dma_direction(direction));
|
|
|
+ if (ops->sync_single_range_for_device)
|
|
|
+ ops->sync_single_range_for_device(hwdev, dma_handle,
|
|
|
+ offset, size, direction);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
@@ -164,9 +195,11 @@ static inline void
|
|
|
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
|
int nelems, int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_sg_for_cpu)
|
|
|
- dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
|
|
+ if (ops->sync_sg_for_cpu)
|
|
|
+ ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
|
|
flush_write_buffers();
|
|
|
}
|
|
|
|
|
@@ -174,9 +207,11 @@ static inline void
|
|
|
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
|
int nelems, int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(hwdev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- if (dma_ops->sync_sg_for_device)
|
|
|
- dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
|
|
+ if (ops->sync_sg_for_device)
|
|
|
+ ops->sync_sg_for_device(hwdev, sg, nelems, direction);
|
|
|
|
|
|
flush_write_buffers();
|
|
|
}
|
|
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|
|
size_t offset, size_t size,
|
|
|
int direction)
|
|
|
{
|
|
|
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
|
|
|
+
|
|
|
BUG_ON(!valid_dma_direction(direction));
|
|
|
- return dma_ops->map_single(dev, page_to_phys(page)+offset,
|
|
|
- size, direction);
|
|
|
+ return ops->map_single(dev, page_to_phys(page) + offset,
|
|
|
+ size, direction);
|
|
|
}
|
|
|
|
|
|
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
|