dma-mapping.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #ifndef ASMARM_DMA_MAPPING_H
  2. #define ASMARM_DMA_MAPPING_H
  3. #ifdef __KERNEL__
  4. #include <linux/mm_types.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/dma-debug.h>
  7. #include <asm-generic/dma-coherent.h>
  8. #include <asm/memory.h>
  9. #define DMA_ERROR_CODE (~0)
  10. extern struct dma_map_ops arm_dma_ops;
  11. static inline struct dma_map_ops *get_dma_ops(struct device *dev)
  12. {
  13. if (dev && dev->archdata.dma_ops)
  14. return dev->archdata.dma_ops;
  15. return &arm_dma_ops;
  16. }
  17. static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
  18. {
  19. BUG_ON(!dev);
  20. dev->archdata.dma_ops = ops;
  21. }
  22. #include <asm-generic/dma-mapping-common.h>
  23. static inline int dma_set_mask(struct device *dev, u64 mask)
  24. {
  25. return get_dma_ops(dev)->set_dma_mask(dev, mask);
  26. }
  27. #ifdef __arch_page_to_dma
  28. #error Please update to __arch_pfn_to_dma
  29. #endif
  30. /*
  31. * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  32. * functions used internally by the DMA-mapping API to provide DMA
  33. * addresses. They must not be used by drivers.
  34. */
  35. #ifndef __arch_pfn_to_dma
  36. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  37. {
  38. return (dma_addr_t)__pfn_to_bus(pfn);
  39. }
  40. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  41. {
  42. return __bus_to_pfn(addr);
  43. }
  44. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  45. {
  46. return (void *)__bus_to_virt((unsigned long)addr);
  47. }
  48. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  49. {
  50. return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  51. }
  52. #else
  53. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  54. {
  55. return __arch_pfn_to_dma(dev, pfn);
  56. }
  57. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  58. {
  59. return __arch_dma_to_pfn(dev, addr);
  60. }
  61. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  62. {
  63. return __arch_dma_to_virt(dev, addr);
  64. }
  65. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  66. {
  67. return __arch_virt_to_dma(dev, addr);
  68. }
  69. #endif
  70. /*
  71. * The DMA API is built upon the notion of "buffer ownership". A buffer
  72. * is either exclusively owned by the CPU (and therefore may be accessed
  73. * by it) or exclusively owned by the DMA device. These helper functions
  74. * represent the transitions between these two ownership states.
  75. *
  76. * Note, however, that on later ARMs, this notion does not work due to
  77. * speculative prefetches. We model our approach on the assumption that
  78. * the CPU does do speculative prefetches, which means we clean caches
  79. * before transfers and delay cache invalidation until transfer completion.
  80. *
  81. * Private support functions: these are not part of the API and are
  82. * liable to change. Drivers must not use these.
  83. */
  84. static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
  85. enum dma_data_direction dir)
  86. {
  87. extern void ___dma_single_cpu_to_dev(const void *, size_t,
  88. enum dma_data_direction);
  89. if (!arch_is_coherent())
  90. ___dma_single_cpu_to_dev(kaddr, size, dir);
  91. }
  92. static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
  93. enum dma_data_direction dir)
  94. {
  95. extern void ___dma_single_dev_to_cpu(const void *, size_t,
  96. enum dma_data_direction);
  97. if (!arch_is_coherent())
  98. ___dma_single_dev_to_cpu(kaddr, size, dir);
  99. }
  100. static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
  101. size_t size, enum dma_data_direction dir)
  102. {
  103. extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
  104. size_t, enum dma_data_direction);
  105. if (!arch_is_coherent())
  106. ___dma_page_cpu_to_dev(page, off, size, dir);
  107. }
  108. static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
  109. size_t size, enum dma_data_direction dir)
  110. {
  111. extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
  112. size_t, enum dma_data_direction);
  113. if (!arch_is_coherent())
  114. ___dma_page_dev_to_cpu(page, off, size, dir);
  115. }
  116. extern int dma_supported(struct device *, u64);
  117. extern int dma_set_mask(struct device *, u64);
  118. /*
  119. * DMA errors are defined by all-bits-set in the DMA address.
  120. */
  121. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  122. {
  123. return dma_addr == DMA_ERROR_CODE;
  124. }
  125. /*
  126. * Dummy noncoherent implementation. We don't provide a dma_cache_sync
  127. * function so drivers using this API are highlighted with build warnings.
  128. */
  129. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  130. dma_addr_t *handle, gfp_t gfp)
  131. {
  132. return NULL;
  133. }
  134. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  135. void *cpu_addr, dma_addr_t handle)
  136. {
  137. }
  138. /**
  139. * dma_alloc_coherent - allocate consistent memory for DMA
  140. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  141. * @size: required memory size
  142. * @handle: bus-specific DMA address
  143. *
  144. * Allocate some uncached, unbuffered memory for a device for
  145. * performing DMA. This function allocates pages, and will
  146. * return the CPU-viewed address, and sets @handle to be the
  147. * device-viewed address.
  148. */
  149. extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
  150. /**
  151. * dma_free_coherent - free memory allocated by dma_alloc_coherent
  152. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  153. * @size: size of memory originally requested in dma_alloc_coherent
  154. * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  155. * @handle: device-view address returned from dma_alloc_coherent
  156. *
  157. * Free (and unmap) a DMA buffer previously allocated by
  158. * dma_alloc_coherent().
  159. *
  160. * References to memory and mappings associated with cpu_addr/handle
  161. * during and after this call executing are illegal.
  162. */
  163. extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
  164. /**
  165. * dma_mmap_coherent - map a coherent DMA allocation into user space
  166. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  167. * @vma: vm_area_struct describing requested user mapping
  168. * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  169. * @handle: device-view address returned from dma_alloc_coherent
  170. * @size: size of memory originally requested in dma_alloc_coherent
  171. *
  172. * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  173. * into user space. The coherent DMA buffer must not be freed by the
  174. * driver until the user space mapping has been released.
  175. */
  176. int dma_mmap_coherent(struct device *, struct vm_area_struct *,
  177. void *, dma_addr_t, size_t);
  178. /**
  179. * dma_alloc_writecombine - allocate writecombining memory for DMA
  180. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  181. * @size: required memory size
  182. * @handle: bus-specific DMA address
  183. *
  184. * Allocate some uncached, buffered memory for a device for
  185. * performing DMA. This function allocates pages, and will
  186. * return the CPU-viewed address, and sets @handle to be the
  187. * device-viewed address.
  188. */
  189. extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
  190. gfp_t);
  191. #define dma_free_writecombine(dev,size,cpu_addr,handle) \
  192. dma_free_coherent(dev,size,cpu_addr,handle)
  193. int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  194. void *, dma_addr_t, size_t);
  195. /*
  196. * This can be called during boot to increase the size of the consistent
  197. * DMA region above it's default value of 2MB. It must be called before the
  198. * memory allocator is initialised, i.e. before any core_initcall.
  199. */
  200. extern void __init init_consistent_dma_size(unsigned long size);
  201. #ifdef CONFIG_DMABOUNCE
  202. /*
  203. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
  204. * and utilize bounce buffers as needed to work around limited DMA windows.
  205. *
  206. * On the SA-1111, a bug limits DMA to only certain regions of RAM.
  207. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
  208. * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
  209. *
  210. * The following are helper functions used by the dmabounce subystem
  211. *
  212. */
  213. /**
  214. * dmabounce_register_dev
  215. *
  216. * @dev: valid struct device pointer
  217. * @small_buf_size: size of buffers to use with small buffer pool
  218. * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
  219. * @needs_bounce_fn: called to determine whether buffer needs bouncing
  220. *
  221. * This function should be called by low-level platform code to register
  222. * a device as requireing DMA buffer bouncing. The function will allocate
  223. * appropriate DMA pools for the device.
  224. */
  225. extern int dmabounce_register_dev(struct device *, unsigned long,
  226. unsigned long, int (*)(struct device *, dma_addr_t, size_t));
  227. /**
  228. * dmabounce_unregister_dev
  229. *
  230. * @dev: valid struct device pointer
  231. *
  232. * This function should be called by low-level platform code when device
  233. * that was previously registered with dmabounce_register_dev is removed
  234. * from the system.
  235. *
  236. */
  237. extern void dmabounce_unregister_dev(struct device *);
  238. /*
  239. * The DMA API, implemented by dmabounce.c. See below for descriptions.
  240. */
  241. extern dma_addr_t __dma_map_page(struct device *, struct page *,
  242. unsigned long, size_t, enum dma_data_direction);
  243. extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
  244. enum dma_data_direction);
  245. /*
  246. * Private functions
  247. */
  248. int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
  249. int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
  250. #else
  251. static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
  252. size_t size, enum dma_data_direction dir)
  253. {
  254. return 1;
  255. }
  256. static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
  257. size_t size, enum dma_data_direction dir)
  258. {
  259. return 1;
  260. }
  261. static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
  262. unsigned long offset, size_t size, enum dma_data_direction dir)
  263. {
  264. __dma_page_cpu_to_dev(page, offset, size, dir);
  265. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  266. }
  267. static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
  268. size_t size, enum dma_data_direction dir)
  269. {
  270. __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
  271. handle & ~PAGE_MASK, size, dir);
  272. }
  273. #endif /* CONFIG_DMABOUNCE */
  274. /*
  275. * The scatter list versions of the above methods.
  276. */
  277. extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
  278. enum dma_data_direction, struct dma_attrs *attrs);
  279. extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
  280. enum dma_data_direction, struct dma_attrs *attrs);
  281. extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
  282. enum dma_data_direction);
  283. extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
  284. enum dma_data_direction);
  285. #endif /* __KERNEL__ */
  286. #endif