dma-mapping.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #ifndef _ASM_X86_DMA_MAPPING_H
  2. #define _ASM_X86_DMA_MAPPING_H
  3. /*
  4. * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
  5. * Documentation/DMA-API.txt for documentation.
  6. */
  7. #include <linux/scatterlist.h>
  8. #include <linux/dma-debug.h>
  9. #include <linux/dma-attrs.h>
  10. #include <asm/io.h>
  11. #include <asm/swiotlb.h>
  12. #include <asm-generic/dma-coherent.h>
  13. extern dma_addr_t bad_dma_address;
  14. extern int iommu_merge;
  15. extern struct device x86_dma_fallback_dev;
  16. extern int panic_on_overflow;
  17. extern struct dma_map_ops *dma_ops;
  18. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  19. {
  20. #ifdef CONFIG_X86_32
  21. return dma_ops;
  22. #else
  23. if (unlikely(!dev) || !dev->archdata.dma_ops)
  24. return dma_ops;
  25. else
  26. return dev->archdata.dma_ops;
  27. #endif
  28. }
  29. /* Make sure we keep the same behaviour */
  30. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  31. {
  32. struct dma_map_ops *ops = get_dma_ops(dev);
  33. if (ops->mapping_error)
  34. return ops->mapping_error(dev, dma_addr);
  35. return (dma_addr == bad_dma_address);
  36. }
  37. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  38. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  39. #define dma_is_consistent(d, h) (1)
  40. extern int dma_supported(struct device *hwdev, u64 mask);
  41. extern int dma_set_mask(struct device *dev, u64 mask);
  42. extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  43. dma_addr_t *dma_addr, gfp_t flag);
  44. static inline dma_addr_t
  45. dma_map_single(struct device *hwdev, void *ptr, size_t size,
  46. enum dma_data_direction dir)
  47. {
  48. struct dma_map_ops *ops = get_dma_ops(hwdev);
  49. dma_addr_t addr;
  50. BUG_ON(!valid_dma_direction(dir));
  51. addr = ops->map_page(hwdev, virt_to_page(ptr),
  52. (unsigned long)ptr & ~PAGE_MASK, size,
  53. dir, NULL);
  54. debug_dma_map_page(hwdev, virt_to_page(ptr),
  55. (unsigned long)ptr & ~PAGE_MASK, size,
  56. dir, addr, true);
  57. return addr;
  58. }
  59. static inline void
  60. dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  61. enum dma_data_direction dir)
  62. {
  63. struct dma_map_ops *ops = get_dma_ops(dev);
  64. BUG_ON(!valid_dma_direction(dir));
  65. if (ops->unmap_page)
  66. ops->unmap_page(dev, addr, size, dir, NULL);
  67. debug_dma_unmap_page(dev, addr, size, dir, true);
  68. }
  69. static inline int
  70. dma_map_sg(struct device *hwdev, struct scatterlist *sg,
  71. int nents, enum dma_data_direction dir)
  72. {
  73. struct dma_map_ops *ops = get_dma_ops(hwdev);
  74. int ents;
  75. BUG_ON(!valid_dma_direction(dir));
  76. ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
  77. debug_dma_map_sg(hwdev, sg, nents, ents, dir);
  78. return ents;
  79. }
  80. static inline void
  81. dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
  82. enum dma_data_direction dir)
  83. {
  84. struct dma_map_ops *ops = get_dma_ops(hwdev);
  85. BUG_ON(!valid_dma_direction(dir));
  86. debug_dma_unmap_sg(hwdev, sg, nents, dir);
  87. if (ops->unmap_sg)
  88. ops->unmap_sg(hwdev, sg, nents, dir, NULL);
  89. }
  90. static inline void
  91. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  92. size_t size, enum dma_data_direction dir)
  93. {
  94. struct dma_map_ops *ops = get_dma_ops(hwdev);
  95. BUG_ON(!valid_dma_direction(dir));
  96. if (ops->sync_single_for_cpu)
  97. ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
  98. debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
  99. flush_write_buffers();
  100. }
  101. static inline void
  102. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  103. size_t size, enum dma_data_direction dir)
  104. {
  105. struct dma_map_ops *ops = get_dma_ops(hwdev);
  106. BUG_ON(!valid_dma_direction(dir));
  107. if (ops->sync_single_for_device)
  108. ops->sync_single_for_device(hwdev, dma_handle, size, dir);
  109. debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
  110. flush_write_buffers();
  111. }
  112. static inline void
  113. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  114. unsigned long offset, size_t size,
  115. enum dma_data_direction dir)
  116. {
  117. struct dma_map_ops *ops = get_dma_ops(hwdev);
  118. BUG_ON(!valid_dma_direction(dir));
  119. if (ops->sync_single_range_for_cpu)
  120. ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
  121. size, dir);
  122. debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
  123. offset, size, dir);
  124. flush_write_buffers();
  125. }
  126. static inline void
  127. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  128. unsigned long offset, size_t size,
  129. enum dma_data_direction dir)
  130. {
  131. struct dma_map_ops *ops = get_dma_ops(hwdev);
  132. BUG_ON(!valid_dma_direction(dir));
  133. if (ops->sync_single_range_for_device)
  134. ops->sync_single_range_for_device(hwdev, dma_handle,
  135. offset, size, dir);
  136. debug_dma_sync_single_range_for_device(hwdev, dma_handle,
  137. offset, size, dir);
  138. flush_write_buffers();
  139. }
  140. static inline void
  141. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  142. int nelems, enum dma_data_direction dir)
  143. {
  144. struct dma_map_ops *ops = get_dma_ops(hwdev);
  145. BUG_ON(!valid_dma_direction(dir));
  146. if (ops->sync_sg_for_cpu)
  147. ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
  148. debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
  149. flush_write_buffers();
  150. }
  151. static inline void
  152. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  153. int nelems, enum dma_data_direction dir)
  154. {
  155. struct dma_map_ops *ops = get_dma_ops(hwdev);
  156. BUG_ON(!valid_dma_direction(dir));
  157. if (ops->sync_sg_for_device)
  158. ops->sync_sg_for_device(hwdev, sg, nelems, dir);
  159. debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
  160. flush_write_buffers();
  161. }
  162. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  163. size_t offset, size_t size,
  164. enum dma_data_direction dir)
  165. {
  166. struct dma_map_ops *ops = get_dma_ops(dev);
  167. dma_addr_t addr;
  168. BUG_ON(!valid_dma_direction(dir));
  169. addr = ops->map_page(dev, page, offset, size, dir, NULL);
  170. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  171. return addr;
  172. }
  173. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  174. size_t size, enum dma_data_direction dir)
  175. {
  176. struct dma_map_ops *ops = get_dma_ops(dev);
  177. BUG_ON(!valid_dma_direction(dir));
  178. if (ops->unmap_page)
  179. ops->unmap_page(dev, addr, size, dir, NULL);
  180. debug_dma_unmap_page(dev, addr, size, dir, false);
  181. }
  182. static inline void
  183. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  184. enum dma_data_direction dir)
  185. {
  186. flush_write_buffers();
  187. }
  188. static inline int dma_get_cache_alignment(void)
  189. {
  190. /* no easy way to get cache size on all x86, so return the
  191. * maximum possible, to be safe */
  192. return boot_cpu_data.x86_clflush_size;
  193. }
  194. static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
  195. gfp_t gfp)
  196. {
  197. unsigned long dma_mask = 0;
  198. dma_mask = dev->coherent_dma_mask;
  199. if (!dma_mask)
  200. dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
  201. return dma_mask;
  202. }
  203. static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  204. {
  205. unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  206. if (dma_mask <= DMA_BIT_MASK(24))
  207. gfp |= GFP_DMA;
  208. #ifdef CONFIG_X86_64
  209. if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
  210. gfp |= GFP_DMA32;
  211. #endif
  212. return gfp;
  213. }
  214. static inline void *
  215. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  216. gfp_t gfp)
  217. {
  218. struct dma_map_ops *ops = get_dma_ops(dev);
  219. void *memory;
  220. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  221. if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
  222. return memory;
  223. if (!dev) {
  224. dev = &x86_dma_fallback_dev;
  225. gfp |= GFP_DMA;
  226. }
  227. if (!is_device_dma_capable(dev))
  228. return NULL;
  229. if (!ops->alloc_coherent)
  230. return NULL;
  231. memory = ops->alloc_coherent(dev, size, dma_handle,
  232. dma_alloc_coherent_gfp_flags(dev, gfp));
  233. debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
  234. return memory;
  235. }
  236. static inline void dma_free_coherent(struct device *dev, size_t size,
  237. void *vaddr, dma_addr_t bus)
  238. {
  239. struct dma_map_ops *ops = get_dma_ops(dev);
  240. WARN_ON(irqs_disabled()); /* for portability */
  241. if (dma_release_from_coherent(dev, get_order(size), vaddr))
  242. return;
  243. debug_dma_free_coherent(dev, size, vaddr, bus);
  244. if (ops->free_coherent)
  245. ops->free_coherent(dev, size, vaddr, bus);
  246. }
  247. #endif