dma-mapping.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. #ifndef _ASM_X86_DMA_MAPPING_H
  2. #define _ASM_X86_DMA_MAPPING_H
  3. /*
  4. * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
  5. * documentation.
  6. */
  7. #include <linux/scatterlist.h>
  8. #include <asm/io.h>
  9. #include <asm/swiotlb.h>
  10. #include <asm-generic/dma-coherent.h>
  11. extern dma_addr_t bad_dma_address;
  12. extern int iommu_merge;
  13. extern struct device x86_dma_fallback_dev;
  14. extern int panic_on_overflow;
  15. struct dma_mapping_ops {
  16. int (*mapping_error)(struct device *dev,
  17. dma_addr_t dma_addr);
  18. void* (*alloc_coherent)(struct device *dev, size_t size,
  19. dma_addr_t *dma_handle, gfp_t gfp);
  20. void (*free_coherent)(struct device *dev, size_t size,
  21. void *vaddr, dma_addr_t dma_handle);
  22. dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
  23. size_t size, int direction);
  24. void (*unmap_single)(struct device *dev, dma_addr_t addr,
  25. size_t size, int direction);
  26. void (*sync_single_for_cpu)(struct device *hwdev,
  27. dma_addr_t dma_handle, size_t size,
  28. int direction);
  29. void (*sync_single_for_device)(struct device *hwdev,
  30. dma_addr_t dma_handle, size_t size,
  31. int direction);
  32. void (*sync_single_range_for_cpu)(struct device *hwdev,
  33. dma_addr_t dma_handle, unsigned long offset,
  34. size_t size, int direction);
  35. void (*sync_single_range_for_device)(struct device *hwdev,
  36. dma_addr_t dma_handle, unsigned long offset,
  37. size_t size, int direction);
  38. void (*sync_sg_for_cpu)(struct device *hwdev,
  39. struct scatterlist *sg, int nelems,
  40. int direction);
  41. void (*sync_sg_for_device)(struct device *hwdev,
  42. struct scatterlist *sg, int nelems,
  43. int direction);
  44. int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
  45. int nents, int direction);
  46. void (*unmap_sg)(struct device *hwdev,
  47. struct scatterlist *sg, int nents,
  48. int direction);
  49. int (*dma_supported)(struct device *hwdev, u64 mask);
  50. int is_phys;
  51. };
  52. extern struct dma_mapping_ops *dma_ops;
  53. static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
  54. {
  55. #ifdef CONFIG_X86_32
  56. return dma_ops;
  57. #else
  58. if (unlikely(!dev) || !dev->archdata.dma_ops)
  59. return dma_ops;
  60. else
  61. return dev->archdata.dma_ops;
  62. #endif /* _ASM_X86_DMA_MAPPING_H */
  63. }
  64. /* Make sure we keep the same behaviour */
  65. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  66. {
  67. #ifdef CONFIG_X86_64
  68. struct dma_mapping_ops *ops = get_dma_ops(dev);
  69. if (ops->mapping_error)
  70. return ops->mapping_error(dev, dma_addr);
  71. #endif
  72. return (dma_addr == bad_dma_address);
  73. }
  74. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  75. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  76. #define dma_is_consistent(d, h) (1)
  77. extern int dma_supported(struct device *hwdev, u64 mask);
  78. extern int dma_set_mask(struct device *dev, u64 mask);
  79. extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  80. dma_addr_t *dma_addr, gfp_t flag);
  81. static inline dma_addr_t
  82. dma_map_single(struct device *hwdev, void *ptr, size_t size,
  83. int direction)
  84. {
  85. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  86. BUG_ON(!valid_dma_direction(direction));
  87. return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
  88. }
  89. static inline void
  90. dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  91. int direction)
  92. {
  93. struct dma_mapping_ops *ops = get_dma_ops(dev);
  94. BUG_ON(!valid_dma_direction(direction));
  95. if (ops->unmap_single)
  96. ops->unmap_single(dev, addr, size, direction);
  97. }
  98. static inline int
  99. dma_map_sg(struct device *hwdev, struct scatterlist *sg,
  100. int nents, int direction)
  101. {
  102. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  103. BUG_ON(!valid_dma_direction(direction));
  104. return ops->map_sg(hwdev, sg, nents, direction);
  105. }
  106. static inline void
  107. dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
  108. int direction)
  109. {
  110. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  111. BUG_ON(!valid_dma_direction(direction));
  112. if (ops->unmap_sg)
  113. ops->unmap_sg(hwdev, sg, nents, direction);
  114. }
  115. static inline void
  116. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  117. size_t size, int direction)
  118. {
  119. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  120. BUG_ON(!valid_dma_direction(direction));
  121. if (ops->sync_single_for_cpu)
  122. ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
  123. flush_write_buffers();
  124. }
  125. static inline void
  126. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  127. size_t size, int direction)
  128. {
  129. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  130. BUG_ON(!valid_dma_direction(direction));
  131. if (ops->sync_single_for_device)
  132. ops->sync_single_for_device(hwdev, dma_handle, size, direction);
  133. flush_write_buffers();
  134. }
  135. static inline void
  136. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  137. unsigned long offset, size_t size, int direction)
  138. {
  139. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  140. BUG_ON(!valid_dma_direction(direction));
  141. if (ops->sync_single_range_for_cpu)
  142. ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
  143. size, direction);
  144. flush_write_buffers();
  145. }
  146. static inline void
  147. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  148. unsigned long offset, size_t size,
  149. int direction)
  150. {
  151. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  152. BUG_ON(!valid_dma_direction(direction));
  153. if (ops->sync_single_range_for_device)
  154. ops->sync_single_range_for_device(hwdev, dma_handle,
  155. offset, size, direction);
  156. flush_write_buffers();
  157. }
  158. static inline void
  159. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  160. int nelems, int direction)
  161. {
  162. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  163. BUG_ON(!valid_dma_direction(direction));
  164. if (ops->sync_sg_for_cpu)
  165. ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  166. flush_write_buffers();
  167. }
  168. static inline void
  169. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  170. int nelems, int direction)
  171. {
  172. struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  173. BUG_ON(!valid_dma_direction(direction));
  174. if (ops->sync_sg_for_device)
  175. ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  176. flush_write_buffers();
  177. }
  178. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  179. size_t offset, size_t size,
  180. int direction)
  181. {
  182. struct dma_mapping_ops *ops = get_dma_ops(dev);
  183. BUG_ON(!valid_dma_direction(direction));
  184. return ops->map_single(dev, page_to_phys(page) + offset,
  185. size, direction);
  186. }
  187. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  188. size_t size, int direction)
  189. {
  190. dma_unmap_single(dev, addr, size, direction);
  191. }
  192. static inline void
  193. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  194. enum dma_data_direction dir)
  195. {
  196. flush_write_buffers();
  197. }
  198. static inline int dma_get_cache_alignment(void)
  199. {
  200. /* no easy way to get cache size on all x86, so return the
  201. * maximum possible, to be safe */
  202. return boot_cpu_data.x86_clflush_size;
  203. }
  204. static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
  205. gfp_t gfp)
  206. {
  207. unsigned long dma_mask = 0;
  208. dma_mask = dev->coherent_dma_mask;
  209. if (!dma_mask)
  210. dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
  211. return dma_mask;
  212. }
  213. static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  214. {
  215. unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  216. if (dma_mask <= DMA_24BIT_MASK)
  217. gfp |= GFP_DMA;
  218. #ifdef CONFIG_X86_64
  219. if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
  220. gfp |= GFP_DMA32;
  221. #endif
  222. return gfp;
  223. }
  224. static inline void *
  225. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  226. gfp_t gfp)
  227. {
  228. struct dma_mapping_ops *ops = get_dma_ops(dev);
  229. void *memory;
  230. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  231. if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
  232. return memory;
  233. if (!dev) {
  234. dev = &x86_dma_fallback_dev;
  235. gfp |= GFP_DMA;
  236. }
  237. if (!is_device_dma_capable(dev))
  238. return NULL;
  239. if (!ops->alloc_coherent)
  240. return NULL;
  241. return ops->alloc_coherent(dev, size, dma_handle,
  242. dma_alloc_coherent_gfp_flags(dev, gfp));
  243. }
  244. static inline void dma_free_coherent(struct device *dev, size_t size,
  245. void *vaddr, dma_addr_t bus)
  246. {
  247. struct dma_mapping_ops *ops = get_dma_ops(dev);
  248. WARN_ON(irqs_disabled()); /* for portability */
  249. if (dma_release_from_coherent(dev, get_order(size), vaddr))
  250. return;
  251. if (ops->free_coherent)
  252. ops->free_coherent(dev, size, vaddr, bus);
  253. }
  254. #endif