dma-mapping.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. #ifndef ASMARM_DMA_MAPPING_H
  2. #define ASMARM_DMA_MAPPING_H
  3. #ifdef __KERNEL__
  4. #include <linux/mm_types.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/dma-attrs.h>
  7. #include <linux/dma-debug.h>
  8. #include <asm-generic/dma-coherent.h>
  9. #include <asm/memory.h>
  10. #include <xen/xen.h>
  11. #include <asm/xen/hypervisor.h>
  12. #define DMA_ERROR_CODE (~0)
  13. extern struct dma_map_ops arm_dma_ops;
  14. extern struct dma_map_ops arm_coherent_dma_ops;
  15. static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
  16. {
  17. if (dev && dev->archdata.dma_ops)
  18. return dev->archdata.dma_ops;
  19. return &arm_dma_ops;
  20. }
  21. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  22. {
  23. if (xen_initial_domain())
  24. return xen_dma_ops;
  25. else
  26. return __generic_dma_ops(dev);
  27. }
  28. static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
  29. {
  30. BUG_ON(!dev);
  31. dev->archdata.dma_ops = ops;
  32. }
  33. #include <asm-generic/dma-mapping-common.h>
  34. static inline int dma_set_mask(struct device *dev, u64 mask)
  35. {
  36. return get_dma_ops(dev)->set_dma_mask(dev, mask);
  37. }
  38. #ifdef __arch_page_to_dma
  39. #error Please update to __arch_pfn_to_dma
  40. #endif
  41. /*
  42. * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  43. * functions used internally by the DMA-mapping API to provide DMA
  44. * addresses. They must not be used by drivers.
  45. */
  46. #ifndef __arch_pfn_to_dma
  47. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  48. {
  49. return (dma_addr_t)__pfn_to_bus(pfn);
  50. }
  51. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  52. {
  53. return __bus_to_pfn(addr);
  54. }
  55. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  56. {
  57. return (void *)__bus_to_virt((unsigned long)addr);
  58. }
  59. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  60. {
  61. return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  62. }
  63. #else
  64. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  65. {
  66. return __arch_pfn_to_dma(dev, pfn);
  67. }
  68. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  69. {
  70. return __arch_dma_to_pfn(dev, addr);
  71. }
  72. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  73. {
  74. return __arch_dma_to_virt(dev, addr);
  75. }
  76. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  77. {
  78. return __arch_virt_to_dma(dev, addr);
  79. }
  80. #endif
  81. /* The ARM override for dma_max_pfn() */
  82. static inline unsigned long dma_max_pfn(struct device *dev)
  83. {
  84. return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
  85. }
  86. #define dma_max_pfn(dev) dma_max_pfn(dev)
  87. static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  88. {
  89. unsigned int offset = paddr & ~PAGE_MASK;
  90. return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
  91. }
  92. static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
  93. {
  94. unsigned int offset = dev_addr & ~PAGE_MASK;
  95. return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
  96. }
  97. static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
  98. {
  99. u64 limit, mask;
  100. if (!dev->dma_mask)
  101. return 0;
  102. mask = *dev->dma_mask;
  103. limit = (mask + 1) & ~mask;
  104. if (limit && size > limit)
  105. return 0;
  106. if ((addr | (addr + size - 1)) & ~mask)
  107. return 0;
  108. return 1;
  109. }
  110. static inline void dma_mark_clean(void *addr, size_t size) { }
  111. /*
  112. * DMA errors are defined by all-bits-set in the DMA address.
  113. */
  114. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  115. {
  116. debug_dma_mapping_error(dev, dma_addr);
  117. return dma_addr == DMA_ERROR_CODE;
  118. }
  119. /*
  120. * Dummy noncoherent implementation. We don't provide a dma_cache_sync
  121. * function so drivers using this API are highlighted with build warnings.
  122. */
  123. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  124. dma_addr_t *handle, gfp_t gfp)
  125. {
  126. return NULL;
  127. }
  128. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  129. void *cpu_addr, dma_addr_t handle)
  130. {
  131. }
  132. extern int dma_supported(struct device *dev, u64 mask);
  133. extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
  134. /**
  135. * arm_dma_alloc - allocate consistent memory for DMA
  136. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  137. * @size: required memory size
  138. * @handle: bus-specific DMA address
  139. * @attrs: optinal attributes that specific mapping properties
  140. *
  141. * Allocate some memory for a device for performing DMA. This function
  142. * allocates pages, and will return the CPU-viewed address, and sets @handle
  143. * to be the device-viewed address.
  144. */
  145. extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  146. gfp_t gfp, struct dma_attrs *attrs);
  147. #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
  148. static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  149. dma_addr_t *dma_handle, gfp_t flag,
  150. struct dma_attrs *attrs)
  151. {
  152. struct dma_map_ops *ops = get_dma_ops(dev);
  153. void *cpu_addr;
  154. BUG_ON(!ops);
  155. cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  156. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  157. return cpu_addr;
  158. }
  159. /**
  160. * arm_dma_free - free memory allocated by arm_dma_alloc
  161. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  162. * @size: size of memory originally requested in dma_alloc_coherent
  163. * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  164. * @handle: device-view address returned from dma_alloc_coherent
  165. * @attrs: optinal attributes that specific mapping properties
  166. *
  167. * Free (and unmap) a DMA buffer previously allocated by
  168. * arm_dma_alloc().
  169. *
  170. * References to memory and mappings associated with cpu_addr/handle
  171. * during and after this call executing are illegal.
  172. */
  173. extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
  174. dma_addr_t handle, struct dma_attrs *attrs);
  175. #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
  176. static inline void dma_free_attrs(struct device *dev, size_t size,
  177. void *cpu_addr, dma_addr_t dma_handle,
  178. struct dma_attrs *attrs)
  179. {
  180. struct dma_map_ops *ops = get_dma_ops(dev);
  181. BUG_ON(!ops);
  182. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  183. ops->free(dev, size, cpu_addr, dma_handle, attrs);
  184. }
  185. /**
  186. * arm_dma_mmap - map a coherent DMA allocation into user space
  187. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  188. * @vma: vm_area_struct describing requested user mapping
  189. * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  190. * @handle: device-view address returned from dma_alloc_coherent
  191. * @size: size of memory originally requested in dma_alloc_coherent
  192. * @attrs: optinal attributes that specific mapping properties
  193. *
  194. * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  195. * into user space. The coherent DMA buffer must not be freed by the
  196. * driver until the user space mapping has been released.
  197. */
  198. extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
  199. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  200. struct dma_attrs *attrs);
  201. static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
  202. dma_addr_t *dma_handle, gfp_t flag)
  203. {
  204. DEFINE_DMA_ATTRS(attrs);
  205. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  206. return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
  207. }
  208. static inline void dma_free_writecombine(struct device *dev, size_t size,
  209. void *cpu_addr, dma_addr_t dma_handle)
  210. {
  211. DEFINE_DMA_ATTRS(attrs);
  212. dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
  213. return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
  214. }
  215. /*
  216. * This can be called during early boot to increase the size of the atomic
  217. * coherent DMA pool above the default value of 256KiB. It must be called
  218. * before postcore_initcall.
  219. */
  220. extern void __init init_dma_coherent_pool_size(unsigned long size);
  221. /*
  222. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
  223. * and utilize bounce buffers as needed to work around limited DMA windows.
  224. *
  225. * On the SA-1111, a bug limits DMA to only certain regions of RAM.
  226. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
  227. * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
  228. *
  229. * The following are helper functions used by the dmabounce subystem
  230. *
  231. */
  232. /**
  233. * dmabounce_register_dev
  234. *
  235. * @dev: valid struct device pointer
  236. * @small_buf_size: size of buffers to use with small buffer pool
  237. * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
  238. * @needs_bounce_fn: called to determine whether buffer needs bouncing
  239. *
  240. * This function should be called by low-level platform code to register
  241. * a device as requireing DMA buffer bouncing. The function will allocate
  242. * appropriate DMA pools for the device.
  243. */
  244. extern int dmabounce_register_dev(struct device *, unsigned long,
  245. unsigned long, int (*)(struct device *, dma_addr_t, size_t));
  246. /**
  247. * dmabounce_unregister_dev
  248. *
  249. * @dev: valid struct device pointer
  250. *
  251. * This function should be called by low-level platform code when device
  252. * that was previously registered with dmabounce_register_dev is removed
  253. * from the system.
  254. *
  255. */
  256. extern void dmabounce_unregister_dev(struct device *);
  257. /*
  258. * The scatter list versions of the above methods.
  259. */
  260. extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
  261. enum dma_data_direction, struct dma_attrs *attrs);
  262. extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
  263. enum dma_data_direction, struct dma_attrs *attrs);
  264. extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
  265. enum dma_data_direction);
  266. extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
  267. enum dma_data_direction);
  268. extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
  269. void *cpu_addr, dma_addr_t dma_addr, size_t size,
  270. struct dma_attrs *attrs);
  271. #endif /* __KERNEL__ */
  272. #endif