dma-mapping.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. #ifndef ASMARM_DMA_MAPPING_H
  2. #define ASMARM_DMA_MAPPING_H
  3. #ifdef __KERNEL__
  4. #include <linux/mm_types.h>
  5. #include <linux/scatterlist.h>
  6. #include <linux/dma-debug.h>
  7. #include <asm-generic/dma-coherent.h>
  8. #include <asm/memory.h>
  9. #define DMA_ERROR_CODE (~0)
  10. #ifdef __arch_page_to_dma
  11. #error Please update to __arch_pfn_to_dma
  12. #endif
  13. /*
  14. * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
  15. * functions used internally by the DMA-mapping API to provide DMA
  16. * addresses. They must not be used by drivers.
  17. */
  18. #ifndef __arch_pfn_to_dma
  19. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  20. {
  21. return (dma_addr_t)__pfn_to_bus(pfn);
  22. }
  23. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  24. {
  25. return __bus_to_pfn(addr);
  26. }
  27. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  28. {
  29. return (void *)__bus_to_virt((unsigned long)addr);
  30. }
  31. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  32. {
  33. return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  34. }
  35. #else
  36. static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
  37. {
  38. return __arch_pfn_to_dma(dev, pfn);
  39. }
  40. static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
  41. {
  42. return __arch_dma_to_pfn(dev, addr);
  43. }
  44. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  45. {
  46. return __arch_dma_to_virt(dev, addr);
  47. }
  48. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  49. {
  50. return __arch_virt_to_dma(dev, addr);
  51. }
  52. #endif
  53. /*
  54. * The DMA API is built upon the notion of "buffer ownership". A buffer
  55. * is either exclusively owned by the CPU (and therefore may be accessed
  56. * by it) or exclusively owned by the DMA device. These helper functions
  57. * represent the transitions between these two ownership states.
  58. *
  59. * Note, however, that on later ARMs, this notion does not work due to
  60. * speculative prefetches. We model our approach on the assumption that
  61. * the CPU does do speculative prefetches, which means we clean caches
  62. * before transfers and delay cache invalidation until transfer completion.
  63. *
  64. * Private support functions: these are not part of the API and are
  65. * liable to change. Drivers must not use these.
  66. */
  67. static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
  68. enum dma_data_direction dir)
  69. {
  70. extern void ___dma_single_cpu_to_dev(const void *, size_t,
  71. enum dma_data_direction);
  72. if (!arch_is_coherent())
  73. ___dma_single_cpu_to_dev(kaddr, size, dir);
  74. }
  75. static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
  76. enum dma_data_direction dir)
  77. {
  78. extern void ___dma_single_dev_to_cpu(const void *, size_t,
  79. enum dma_data_direction);
  80. if (!arch_is_coherent())
  81. ___dma_single_dev_to_cpu(kaddr, size, dir);
  82. }
  83. static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
  84. size_t size, enum dma_data_direction dir)
  85. {
  86. extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
  87. size_t, enum dma_data_direction);
  88. if (!arch_is_coherent())
  89. ___dma_page_cpu_to_dev(page, off, size, dir);
  90. }
  91. static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
  92. size_t size, enum dma_data_direction dir)
  93. {
  94. extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
  95. size_t, enum dma_data_direction);
  96. if (!arch_is_coherent())
  97. ___dma_page_dev_to_cpu(page, off, size, dir);
  98. }
  99. extern int dma_supported(struct device *, u64);
  100. extern int dma_set_mask(struct device *, u64);
  101. /*
  102. * DMA errors are defined by all-bits-set in the DMA address.
  103. */
  104. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  105. {
  106. return dma_addr == DMA_ERROR_CODE;
  107. }
  108. /*
  109. * Dummy noncoherent implementation. We don't provide a dma_cache_sync
  110. * function so drivers using this API are highlighted with build warnings.
  111. */
  112. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  113. dma_addr_t *handle, gfp_t gfp)
  114. {
  115. return NULL;
  116. }
  117. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  118. void *cpu_addr, dma_addr_t handle)
  119. {
  120. }
  121. /**
  122. * dma_alloc_coherent - allocate consistent memory for DMA
  123. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  124. * @size: required memory size
  125. * @handle: bus-specific DMA address
  126. *
  127. * Allocate some uncached, unbuffered memory for a device for
  128. * performing DMA. This function allocates pages, and will
  129. * return the CPU-viewed address, and sets @handle to be the
  130. * device-viewed address.
  131. */
  132. extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
  133. /**
  134. * dma_free_coherent - free memory allocated by dma_alloc_coherent
  135. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  136. * @size: size of memory originally requested in dma_alloc_coherent
  137. * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  138. * @handle: device-view address returned from dma_alloc_coherent
  139. *
  140. * Free (and unmap) a DMA buffer previously allocated by
  141. * dma_alloc_coherent().
  142. *
  143. * References to memory and mappings associated with cpu_addr/handle
  144. * during and after this call executing are illegal.
  145. */
  146. extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
  147. /**
  148. * dma_mmap_coherent - map a coherent DMA allocation into user space
  149. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  150. * @vma: vm_area_struct describing requested user mapping
  151. * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  152. * @handle: device-view address returned from dma_alloc_coherent
  153. * @size: size of memory originally requested in dma_alloc_coherent
  154. *
  155. * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  156. * into user space. The coherent DMA buffer must not be freed by the
  157. * driver until the user space mapping has been released.
  158. */
  159. int dma_mmap_coherent(struct device *, struct vm_area_struct *,
  160. void *, dma_addr_t, size_t);
  161. /**
  162. * dma_alloc_writecombine - allocate writecombining memory for DMA
  163. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  164. * @size: required memory size
  165. * @handle: bus-specific DMA address
  166. *
  167. * Allocate some uncached, buffered memory for a device for
  168. * performing DMA. This function allocates pages, and will
  169. * return the CPU-viewed address, and sets @handle to be the
  170. * device-viewed address.
  171. */
  172. extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
  173. gfp_t);
  174. #define dma_free_writecombine(dev,size,cpu_addr,handle) \
  175. dma_free_coherent(dev,size,cpu_addr,handle)
  176. int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  177. void *, dma_addr_t, size_t);
  178. /*
  179. * This can be called during boot to increase the size of the consistent
  180. * DMA region above it's default value of 2MB. It must be called before the
  181. * memory allocator is initialised, i.e. before any core_initcall.
  182. */
  183. extern void __init init_consistent_dma_size(unsigned long size);
  184. #ifdef CONFIG_DMABOUNCE
  185. /*
  186. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
  187. * and utilize bounce buffers as needed to work around limited DMA windows.
  188. *
  189. * On the SA-1111, a bug limits DMA to only certain regions of RAM.
  190. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
  191. * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
  192. *
  193. * The following are helper functions used by the dmabounce subystem
  194. *
  195. */
  196. /**
  197. * dmabounce_register_dev
  198. *
  199. * @dev: valid struct device pointer
  200. * @small_buf_size: size of buffers to use with small buffer pool
  201. * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
  202. * @needs_bounce_fn: called to determine whether buffer needs bouncing
  203. *
  204. * This function should be called by low-level platform code to register
  205. * a device as requireing DMA buffer bouncing. The function will allocate
  206. * appropriate DMA pools for the device.
  207. */
  208. extern int dmabounce_register_dev(struct device *, unsigned long,
  209. unsigned long, int (*)(struct device *, dma_addr_t, size_t));
  210. /**
  211. * dmabounce_unregister_dev
  212. *
  213. * @dev: valid struct device pointer
  214. *
  215. * This function should be called by low-level platform code when device
  216. * that was previously registered with dmabounce_register_dev is removed
  217. * from the system.
  218. *
  219. */
  220. extern void dmabounce_unregister_dev(struct device *);
  221. /*
  222. * The DMA API, implemented by dmabounce.c. See below for descriptions.
  223. */
  224. extern dma_addr_t __dma_map_page(struct device *, struct page *,
  225. unsigned long, size_t, enum dma_data_direction);
  226. extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
  227. enum dma_data_direction);
  228. /*
  229. * Private functions
  230. */
  231. int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
  232. int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
  233. #else
  234. static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
  235. size_t size, enum dma_data_direction dir)
  236. {
  237. return 1;
  238. }
  239. static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
  240. size_t size, enum dma_data_direction dir)
  241. {
  242. return 1;
  243. }
  244. static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
  245. unsigned long offset, size_t size, enum dma_data_direction dir)
  246. {
  247. __dma_page_cpu_to_dev(page, offset, size, dir);
  248. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  249. }
  250. static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
  251. size_t size, enum dma_data_direction dir)
  252. {
  253. __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
  254. handle & ~PAGE_MASK, size, dir);
  255. }
  256. #endif /* CONFIG_DMABOUNCE */
  257. /**
  258. * dma_map_single - map a single buffer for streaming DMA
  259. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  260. * @cpu_addr: CPU direct mapped address of buffer
  261. * @size: size of buffer to map
  262. * @dir: DMA transfer direction
  263. *
  264. * Ensure that any data held in the cache is appropriately discarded
  265. * or written back.
  266. *
  267. * The device owns this memory once this call has completed. The CPU
  268. * can regain ownership by calling dma_unmap_single() or
  269. * dma_sync_single_for_cpu().
  270. */
  271. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  272. size_t size, enum dma_data_direction dir)
  273. {
  274. unsigned long offset;
  275. struct page *page;
  276. dma_addr_t addr;
  277. BUG_ON(!virt_addr_valid(cpu_addr));
  278. BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
  279. BUG_ON(!valid_dma_direction(dir));
  280. page = virt_to_page(cpu_addr);
  281. offset = (unsigned long)cpu_addr & ~PAGE_MASK;
  282. addr = __dma_map_page(dev, page, offset, size, dir);
  283. debug_dma_map_page(dev, page, offset, size, dir, addr, true);
  284. return addr;
  285. }
  286. /**
  287. * dma_map_page - map a portion of a page for streaming DMA
  288. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  289. * @page: page that buffer resides in
  290. * @offset: offset into page for start of buffer
  291. * @size: size of buffer to map
  292. * @dir: DMA transfer direction
  293. *
  294. * Ensure that any data held in the cache is appropriately discarded
  295. * or written back.
  296. *
  297. * The device owns this memory once this call has completed. The CPU
  298. * can regain ownership by calling dma_unmap_page().
  299. */
  300. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  301. unsigned long offset, size_t size, enum dma_data_direction dir)
  302. {
  303. dma_addr_t addr;
  304. BUG_ON(!valid_dma_direction(dir));
  305. addr = __dma_map_page(dev, page, offset, size, dir);
  306. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  307. return addr;
  308. }
  309. /**
  310. * dma_unmap_single - unmap a single buffer previously mapped
  311. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  312. * @handle: DMA address of buffer
  313. * @size: size of buffer (same as passed to dma_map_single)
  314. * @dir: DMA transfer direction (same as passed to dma_map_single)
  315. *
  316. * Unmap a single streaming mode DMA translation. The handle and size
  317. * must match what was provided in the previous dma_map_single() call.
  318. * All other usages are undefined.
  319. *
  320. * After this call, reads by the CPU to the buffer are guaranteed to see
  321. * whatever the device wrote there.
  322. */
  323. static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
  324. size_t size, enum dma_data_direction dir)
  325. {
  326. debug_dma_unmap_page(dev, handle, size, dir, true);
  327. __dma_unmap_page(dev, handle, size, dir);
  328. }
  329. /**
  330. * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
  331. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  332. * @handle: DMA address of buffer
  333. * @size: size of buffer (same as passed to dma_map_page)
  334. * @dir: DMA transfer direction (same as passed to dma_map_page)
  335. *
  336. * Unmap a page streaming mode DMA translation. The handle and size
  337. * must match what was provided in the previous dma_map_page() call.
  338. * All other usages are undefined.
  339. *
  340. * After this call, reads by the CPU to the buffer are guaranteed to see
  341. * whatever the device wrote there.
  342. */
  343. static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
  344. size_t size, enum dma_data_direction dir)
  345. {
  346. debug_dma_unmap_page(dev, handle, size, dir, false);
  347. __dma_unmap_page(dev, handle, size, dir);
  348. }
  349. static inline void dma_sync_single_for_cpu(struct device *dev,
  350. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  351. {
  352. BUG_ON(!valid_dma_direction(dir));
  353. debug_dma_sync_single_for_cpu(dev, handle, size, dir);
  354. if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
  355. return;
  356. __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
  357. }
  358. static inline void dma_sync_single_for_device(struct device *dev,
  359. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  360. {
  361. BUG_ON(!valid_dma_direction(dir));
  362. debug_dma_sync_single_for_device(dev, handle, size, dir);
  363. if (!dmabounce_sync_for_device(dev, handle, size, dir))
  364. return;
  365. __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
  366. }
  367. /**
  368. * dma_sync_single_range_for_cpu
  369. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  370. * @handle: DMA address of buffer
  371. * @offset: offset of region to start sync
  372. * @size: size of region to sync
  373. * @dir: DMA transfer direction (same as passed to dma_map_single)
  374. *
  375. * Make physical memory consistent for a single streaming mode DMA
  376. * translation after a transfer.
  377. *
  378. * If you perform a dma_map_single() but wish to interrogate the
  379. * buffer using the cpu, yet do not wish to teardown the PCI dma
  380. * mapping, you must call this function before doing so. At the
  381. * next point you give the PCI dma address back to the card, you
  382. * must first the perform a dma_sync_for_device, and then the
  383. * device again owns the buffer.
  384. */
  385. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  386. dma_addr_t handle, unsigned long offset, size_t size,
  387. enum dma_data_direction dir)
  388. {
  389. dma_sync_single_for_cpu(dev, handle + offset, size, dir);
  390. }
  391. static inline void dma_sync_single_range_for_device(struct device *dev,
  392. dma_addr_t handle, unsigned long offset, size_t size,
  393. enum dma_data_direction dir)
  394. {
  395. dma_sync_single_for_device(dev, handle + offset, size, dir);
  396. }
  397. /*
  398. * The scatter list versions of the above methods.
  399. */
  400. extern int dma_map_sg(struct device *, struct scatterlist *, int,
  401. enum dma_data_direction);
  402. extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
  403. enum dma_data_direction);
  404. extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
  405. enum dma_data_direction);
  406. extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
  407. enum dma_data_direction);
  408. #endif /* __KERNEL__ */
  409. #endif