dma-mapping.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Copyright (C) 2004 IBM
  3. *
  4. * Implements the generic device dma API for powerpc.
  5. * the pci and vio busses
  6. */
  7. #ifndef _ASM_DMA_MAPPING_H
  8. #define _ASM_DMA_MAPPING_H
  9. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  10. size_t size,
  11. enum dma_data_direction direction)
  12. {
  13. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  14. BUG_ON(!dma_ops);
  15. dma_ops->unmap_single(dev, dma_address, size, direction);
  16. }
  17. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  18. int nents, enum dma_data_direction direction)
  19. {
  20. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  21. BUG_ON(!dma_ops);
  22. return dma_ops->map_sg(dev, sg, nents, direction);
  23. }
  24. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  25. int nhwentries,
  26. enum dma_data_direction direction)
  27. {
  28. struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
  29. BUG_ON(!dma_ops);
  30. dma_ops->unmap_sg(dev, sg, nhwentries, direction);
  31. }
  32. /*
  33. * Available generic sets of operations
  34. */
  35. extern struct dma_mapping_ops dma_iommu_ops;
  36. extern struct dma_mapping_ops dma_direct_ops;
  37. extern unsigned long dma_direct_offset;
  38. #else /* CONFIG_PPC64 */
  39. #define dma_supported(dev, mask) (1)
  40. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  41. {
  42. if (!dev->dma_mask || !dma_supported(dev, mask))
  43. return -EIO;
  44. *dev->dma_mask = dma_mask;
  45. return 0;
  46. }
  47. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  48. dma_addr_t * dma_handle,
  49. gfp_t gfp)
  50. {
  51. #ifdef CONFIG_NOT_COHERENT_CACHE
  52. return __dma_alloc_coherent(size, dma_handle, gfp);
  53. #else
  54. void *ret;
  55. /* ignore region specifiers */
  56. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  57. if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  58. gfp |= GFP_DMA;
  59. ret = (void *)__get_free_pages(gfp, get_order(size));
  60. if (ret != NULL) {
  61. memset(ret, 0, size);
  62. *dma_handle = virt_to_bus(ret);
  63. }
  64. return ret;
  65. #endif
  66. }
  67. static inline void
  68. dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  69. dma_addr_t dma_handle)
  70. {
  71. #ifdef CONFIG_NOT_COHERENT_CACHE
  72. __dma_free_coherent(size, vaddr);
  73. #else
  74. free_pages((unsigned long)vaddr, get_order(size));
  75. #endif
  76. }
  77. static inline dma_addr_t
  78. dma_map_single(struct device *dev, void *ptr, size_t size,
  79. enum dma_data_direction direction)
  80. {
  81. BUG_ON(direction == DMA_NONE);
  82. __dma_sync(ptr, size, direction);
  83. return virt_to_bus(ptr);
  84. }
  85. static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  86. size_t size,
  87. enum dma_data_direction direction)
  88. {
  89. /* We do nothing. */
  90. }
  91. static inline dma_addr_t
  92. dma_map_page(struct device *dev, struct page *page,
  93. unsigned long offset, size_t size,
  94. enum dma_data_direction direction)
  95. {
  96. BUG_ON(direction == DMA_NONE);
  97. __dma_sync_page(page, offset, size, direction);
  98. return page_to_bus(page) + offset;
  99. }
  100. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  101. size_t size,
  102. enum dma_data_direction direction)
  103. {
  104. /* We do nothing. */
  105. }
  106. static inline int
  107. dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
  108. enum dma_data_direction direction)
  109. {
  110. struct scatterlist *sg;
  111. int i;
  112. BUG_ON(direction == DMA_NONE);
  113. for_each_sg(sgl, sg, nents, i) {
  114. BUG_ON(!sg->page);
  115. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  116. sg->dma_address = page_to_bus(sg->page) + sg->offset;
  117. }
  118. return nents;
  119. }
  120. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  121. int nhwentries,
  122. enum dma_data_direction direction)
  123. {
  124. /* We don't do anything here. */
  125. }
  126. #endif /* CONFIG_PPC64 */
  127. static inline void dma_sync_single_for_cpu(struct device *dev,
  128. dma_addr_t dma_handle, size_t size,
  129. enum dma_data_direction direction)
  130. {
  131. BUG_ON(direction == DMA_NONE);
  132. __dma_sync(bus_to_virt(dma_handle), size, direction);
  133. }
  134. static inline void dma_sync_single_for_device(struct device *dev,
  135. dma_addr_t dma_handle, size_t size,
  136. enum dma_data_direction direction)
  137. {
  138. BUG_ON(direction == DMA_NONE);
  139. __dma_sync(bus_to_virt(dma_handle), size, direction);
  140. }
  141. static inline void dma_sync_sg_for_cpu(struct device *dev,
  142. struct scatterlist *sgl, int nents,
  143. enum dma_data_direction direction)
  144. {
  145. struct scatterlist *sg;
  146. int i;
  147. BUG_ON(direction == DMA_NONE);
  148. for_each_sg(sgl, sg, nents, i)
  149. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  150. }
  151. static inline void dma_sync_sg_for_device(struct device *dev,
  152. struct scatterlist *sgl, int nents,
  153. enum dma_data_direction direction)
  154. {
  155. struct scatterlist *sg;
  156. int i;
  157. BUG_ON(direction == DMA_NONE);
  158. for_each_sg(sgl, sg, nents, i)
  159. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  160. }
  161. static inline int dma_mapping_error(dma_addr_t dma_addr)
  162. {
  163. #ifdef CONFIG_PPC64
  164. return (dma_addr == DMA_ERROR_CODE);
  165. #else
  166. return 0;
  167. #endif
  168. }
  169. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  170. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  171. #ifdef CONFIG_NOT_COHERENT_CACHE
  172. #define dma_is_consistent(d, h) (0)
  173. #else
  174. #define dma_is_consistent(d, h) (1)
  175. #endif
  176. static inline int dma_get_cache_alignment(void)
  177. {
  178. #ifdef CONFIG_PPC64
  179. /* no easy way to get cache size on all processors, so return
  180. * the maximum possible, to be safe */
  181. return (1 << INTERNODE_CACHE_SHIFT);
  182. #else
  183. /*
  184. * Each processor family will define its own L1_CACHE_SHIFT,
  185. * L1_CACHE_BYTES wraps to this, so this is always safe.
  186. */
  187. return L1_CACHE_BYTES;
  188. #endif
  189. }
  190. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  191. dma_addr_t dma_handle, unsigned long offset, size_t size,
  192. enum dma_data_direction direction)
  193. {
  194. /* just sync everything for now */
  195. dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
  196. }
  197. static inline void dma_sync_single_range_for_device(struct device *dev,
  198. dma_addr_t dma_handle, unsigned long offset, size_t size,
  199. enum dma_data_direction direction)
  200. {
  201. /* just sync everything for now */
  202. dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
  203. }
  204. static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  205. enum dma_data_direction direction)
  206. {
  207. BUG_ON(direction == DMA_NONE);
  208. __dma_sync(vaddr, size, (int)direction);
  209. }
  210. #endif /* __KERNEL__ */
  211. #endif /* _ASM_DMA_MAPPING_H */