|
@@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
|
|
/*
|
|
|
* Private functions
|
|
|
*/
|
|
|
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
|
|
|
- size_t, enum dma_data_direction);
|
|
|
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
|
|
|
- size_t, enum dma_data_direction);
|
|
|
+int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
|
|
+int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
|
|
#else
|
|
|
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
|
|
|
- unsigned long offset, size_t size, enum dma_data_direction dir)
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|
|
- unsigned long offset, size_t size, enum dma_data_direction dir)
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
{
|
|
|
return 1;
|
|
|
}
|
|
@@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
__dma_unmap_page(dev, handle, size, dir);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static inline void dma_sync_single_for_cpu(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+
|
|
|
+ debug_dma_sync_single_for_cpu(dev, handle, size, dir);
|
|
|
+
|
|
|
+ if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void dma_sync_single_for_device(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ BUG_ON(!valid_dma_direction(dir));
|
|
|
+
|
|
|
+ debug_dma_sync_single_for_device(dev, handle, size, dir);
|
|
|
+
|
|
|
+ if (!dmabounce_sync_for_device(dev, handle, size, dir))
|
|
|
+ return;
|
|
|
+
|
|
|
+ __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* dma_sync_single_range_for_cpu
|
|
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
@@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
|
|
dma_addr_t handle, unsigned long offset, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
-
|
|
|
- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
|
|
-
|
|
|
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
|
|
|
- return;
|
|
|
-
|
|
|
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
|
|
|
+ dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
|
|
}
|
|
|
|
|
|
static inline void dma_sync_single_range_for_device(struct device *dev,
|
|
|
dma_addr_t handle, unsigned long offset, size_t size,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- BUG_ON(!valid_dma_direction(dir));
|
|
|
-
|
|
|
- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
|
|
|
-
|
|
|
- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
|
|
- return;
|
|
|
-
|
|
|
- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void dma_sync_single_for_cpu(struct device *dev,
|
|
|
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void dma_sync_single_for_device(struct device *dev,
|
|
|
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
-{
|
|
|
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
|
|
|
+ dma_sync_single_for_device(dev, handle + offset, size, dir);
|
|
|
}
|
|
|
|
|
|
/*
|