|
@@ -215,21 +215,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
|
#define dma_is_consistent(d, h) (1)
|
|
|
#endif
|
|
|
|
|
|
-static inline int dma_get_cache_alignment(void)
|
|
|
-{
|
|
|
-#ifdef CONFIG_PPC64
|
|
|
- /* no easy way to get cache size on all processors, so return
|
|
|
- * the maximum possible, to be safe */
|
|
|
- return (1 << INTERNODE_CACHE_SHIFT);
|
|
|
-#else
|
|
|
- /*
|
|
|
- * Each processor family will define its own L1_CACHE_SHIFT,
|
|
|
- * L1_CACHE_BYTES wraps to this, so this is always safe.
|
|
|
- */
|
|
|
- return L1_CACHE_BYTES;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
|
enum dma_data_direction direction)
|
|
|
{
|