dma-mapping.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. #ifndef ASMARM_DMA_MAPPING_H
  2. #define ASMARM_DMA_MAPPING_H
  3. #ifdef __KERNEL__
  4. #include <linux/mm_types.h>
  5. #include <linux/scatterlist.h>
  6. #include <asm-generic/dma-coherent.h>
  7. #include <asm/memory.h>
  8. /*
  9. * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
  10. * used internally by the DMA-mapping API to provide DMA addresses. They
  11. * must not be used by drivers.
  12. */
  13. #ifndef __arch_page_to_dma
  14. static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
  15. {
  16. return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
  17. }
  18. static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
  19. {
  20. return pfn_to_page(__bus_to_pfn(addr));
  21. }
  22. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  23. {
  24. return (void *)__bus_to_virt(addr);
  25. }
  26. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  27. {
  28. return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
  29. }
  30. #else
  31. static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
  32. {
  33. return __arch_page_to_dma(dev, page);
  34. }
  35. static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
  36. {
  37. return __arch_dma_to_page(dev, addr);
  38. }
  39. static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
  40. {
  41. return __arch_dma_to_virt(dev, addr);
  42. }
  43. static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
  44. {
  45. return __arch_virt_to_dma(dev, addr);
  46. }
  47. #endif
  48. /*
  49. * DMA-consistent mapping functions. These allocate/free a region of
  50. * uncached, unwrite-buffered mapped memory space for use with DMA
  51. * devices. This is the "generic" version. The PCI specific version
  52. * is in pci.h
  53. *
  54. * Note: Drivers should NOT use this function directly, as it will break
  55. * platforms with CONFIG_DMABOUNCE.
  56. * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
  57. */
  58. extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
  59. extern void dma_cache_maint_page(struct page *page, unsigned long offset,
  60. size_t size, int rw);
  61. /*
  62. * Return whether the given device DMA address mask can be supported
  63. * properly. For example, if your device can only drive the low 24-bits
  64. * during bus mastering, then you would pass 0x00ffffff as the mask
  65. * to this function.
  66. *
  67. * FIXME: This should really be a platform specific issue - we should
  68. * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
  69. */
  70. static inline int dma_supported(struct device *dev, u64 mask)
  71. {
  72. if (mask < ISA_DMA_THRESHOLD)
  73. return 0;
  74. return 1;
  75. }
  76. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  77. {
  78. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  79. return -EIO;
  80. *dev->dma_mask = dma_mask;
  81. return 0;
  82. }
  83. static inline int dma_get_cache_alignment(void)
  84. {
  85. return 32;
  86. }
  87. static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
  88. {
  89. return !!arch_is_coherent();
  90. }
  91. /*
  92. * DMA errors are defined by all-bits-set in the DMA address.
  93. */
  94. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  95. {
  96. return dma_addr == ~0;
  97. }
  98. /*
  99. * Dummy noncoherent implementation. We don't provide a dma_cache_sync
  100. * function so drivers using this API are highlighted with build warnings.
  101. */
  102. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  103. dma_addr_t *handle, gfp_t gfp)
  104. {
  105. return NULL;
  106. }
  107. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  108. void *cpu_addr, dma_addr_t handle)
  109. {
  110. }
  111. /**
  112. * dma_alloc_coherent - allocate consistent memory for DMA
  113. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  114. * @size: required memory size
  115. * @handle: bus-specific DMA address
  116. *
  117. * Allocate some uncached, unbuffered memory for a device for
  118. * performing DMA. This function allocates pages, and will
  119. * return the CPU-viewed address, and sets @handle to be the
  120. * device-viewed address.
  121. */
  122. extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
  123. /**
  124. * dma_free_coherent - free memory allocated by dma_alloc_coherent
  125. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  126. * @size: size of memory originally requested in dma_alloc_coherent
  127. * @cpu_addr: CPU-view address returned from dma_alloc_coherent
  128. * @handle: device-view address returned from dma_alloc_coherent
  129. *
  130. * Free (and unmap) a DMA buffer previously allocated by
  131. * dma_alloc_coherent().
  132. *
  133. * References to memory and mappings associated with cpu_addr/handle
  134. * during and after this call executing are illegal.
  135. */
  136. extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
  137. /**
  138. * dma_mmap_coherent - map a coherent DMA allocation into user space
  139. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  140. * @vma: vm_area_struct describing requested user mapping
  141. * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
  142. * @handle: device-view address returned from dma_alloc_coherent
  143. * @size: size of memory originally requested in dma_alloc_coherent
  144. *
  145. * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
  146. * into user space. The coherent DMA buffer must not be freed by the
  147. * driver until the user space mapping has been released.
  148. */
  149. int dma_mmap_coherent(struct device *, struct vm_area_struct *,
  150. void *, dma_addr_t, size_t);
  151. /**
  152. * dma_alloc_writecombine - allocate writecombining memory for DMA
  153. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  154. * @size: required memory size
  155. * @handle: bus-specific DMA address
  156. *
  157. * Allocate some uncached, buffered memory for a device for
  158. * performing DMA. This function allocates pages, and will
  159. * return the CPU-viewed address, and sets @handle to be the
  160. * device-viewed address.
  161. */
  162. extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
  163. gfp_t);
  164. #define dma_free_writecombine(dev,size,cpu_addr,handle) \
  165. dma_free_coherent(dev,size,cpu_addr,handle)
  166. int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
  167. void *, dma_addr_t, size_t);
  168. #ifdef CONFIG_DMABOUNCE
  169. /*
  170. * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
  171. * and utilize bounce buffers as needed to work around limited DMA windows.
  172. *
  173. * On the SA-1111, a bug limits DMA to only certain regions of RAM.
  174. * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
  175. * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
  176. *
  177. * The following are helper functions used by the dmabounce subystem
  178. *
  179. */
  180. /**
  181. * dmabounce_register_dev
  182. *
  183. * @dev: valid struct device pointer
  184. * @small_buf_size: size of buffers to use with small buffer pool
  185. * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
  186. *
  187. * This function should be called by low-level platform code to register
  188. * a device as requireing DMA buffer bouncing. The function will allocate
  189. * appropriate DMA pools for the device.
  190. *
  191. */
  192. extern int dmabounce_register_dev(struct device *, unsigned long,
  193. unsigned long);
  194. /**
  195. * dmabounce_unregister_dev
  196. *
  197. * @dev: valid struct device pointer
  198. *
  199. * This function should be called by low-level platform code when device
  200. * that was previously registered with dmabounce_register_dev is removed
  201. * from the system.
  202. *
  203. */
  204. extern void dmabounce_unregister_dev(struct device *);
  205. /**
  206. * dma_needs_bounce
  207. *
  208. * @dev: valid struct device pointer
  209. * @dma_handle: dma_handle of unbounced buffer
  210. * @size: size of region being mapped
  211. *
  212. * Platforms that utilize the dmabounce mechanism must implement
  213. * this function.
  214. *
  215. * The dmabounce routines call this function whenever a dma-mapping
  216. * is requested to determine whether a given buffer needs to be bounced
  217. * or not. The function must return 0 if the buffer is OK for
  218. * DMA access and 1 if the buffer needs to be bounced.
  219. *
  220. */
  221. extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
  222. /*
  223. * The DMA API, implemented by dmabounce.c. See below for descriptions.
  224. */
  225. extern dma_addr_t dma_map_single(struct device *, void *, size_t,
  226. enum dma_data_direction);
  227. extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
  228. enum dma_data_direction);
  229. extern dma_addr_t dma_map_page(struct device *, struct page *,
  230. unsigned long, size_t, enum dma_data_direction);
  231. extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
  232. enum dma_data_direction);
  233. /*
  234. * Private functions
  235. */
  236. int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
  237. size_t, enum dma_data_direction);
  238. int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
  239. size_t, enum dma_data_direction);
  240. #else
  241. static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
  242. unsigned long offset, size_t size, enum dma_data_direction dir)
  243. {
  244. return 1;
  245. }
  246. static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
  247. unsigned long offset, size_t size, enum dma_data_direction dir)
  248. {
  249. return 1;
  250. }
  251. /**
  252. * dma_map_single - map a single buffer for streaming DMA
  253. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  254. * @cpu_addr: CPU direct mapped address of buffer
  255. * @size: size of buffer to map
  256. * @dir: DMA transfer direction
  257. *
  258. * Ensure that any data held in the cache is appropriately discarded
  259. * or written back.
  260. *
  261. * The device owns this memory once this call has completed. The CPU
  262. * can regain ownership by calling dma_unmap_single() or
  263. * dma_sync_single_for_cpu().
  264. */
  265. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  266. size_t size, enum dma_data_direction dir)
  267. {
  268. BUG_ON(!valid_dma_direction(dir));
  269. if (!arch_is_coherent())
  270. dma_cache_maint(cpu_addr, size, dir);
  271. return virt_to_dma(dev, cpu_addr);
  272. }
  273. /**
  274. * dma_map_page - map a portion of a page for streaming DMA
  275. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  276. * @page: page that buffer resides in
  277. * @offset: offset into page for start of buffer
  278. * @size: size of buffer to map
  279. * @dir: DMA transfer direction
  280. *
  281. * Ensure that any data held in the cache is appropriately discarded
  282. * or written back.
  283. *
  284. * The device owns this memory once this call has completed. The CPU
  285. * can regain ownership by calling dma_unmap_page().
  286. */
  287. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  288. unsigned long offset, size_t size, enum dma_data_direction dir)
  289. {
  290. BUG_ON(!valid_dma_direction(dir));
  291. if (!arch_is_coherent())
  292. dma_cache_maint_page(page, offset, size, dir);
  293. return page_to_dma(dev, page) + offset;
  294. }
  295. /**
  296. * dma_unmap_single - unmap a single buffer previously mapped
  297. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  298. * @handle: DMA address of buffer
  299. * @size: size of buffer (same as passed to dma_map_single)
  300. * @dir: DMA transfer direction (same as passed to dma_map_single)
  301. *
  302. * Unmap a single streaming mode DMA translation. The handle and size
  303. * must match what was provided in the previous dma_map_single() call.
  304. * All other usages are undefined.
  305. *
  306. * After this call, reads by the CPU to the buffer are guaranteed to see
  307. * whatever the device wrote there.
  308. */
  309. static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
  310. size_t size, enum dma_data_direction dir)
  311. {
  312. /* nothing to do */
  313. }
  314. /**
  315. * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
  316. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  317. * @handle: DMA address of buffer
  318. * @size: size of buffer (same as passed to dma_map_page)
  319. * @dir: DMA transfer direction (same as passed to dma_map_page)
  320. *
  321. * Unmap a page streaming mode DMA translation. The handle and size
  322. * must match what was provided in the previous dma_map_page() call.
  323. * All other usages are undefined.
  324. *
  325. * After this call, reads by the CPU to the buffer are guaranteed to see
  326. * whatever the device wrote there.
  327. */
  328. static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
  329. size_t size, enum dma_data_direction dir)
  330. {
  331. /* nothing to do */
  332. }
  333. #endif /* CONFIG_DMABOUNCE */
  334. /**
  335. * dma_sync_single_range_for_cpu
  336. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  337. * @handle: DMA address of buffer
  338. * @offset: offset of region to start sync
  339. * @size: size of region to sync
  340. * @dir: DMA transfer direction (same as passed to dma_map_single)
  341. *
  342. * Make physical memory consistent for a single streaming mode DMA
  343. * translation after a transfer.
  344. *
  345. * If you perform a dma_map_single() but wish to interrogate the
  346. * buffer using the cpu, yet do not wish to teardown the PCI dma
  347. * mapping, you must call this function before doing so. At the
  348. * next point you give the PCI dma address back to the card, you
  349. * must first the perform a dma_sync_for_device, and then the
  350. * device again owns the buffer.
  351. */
  352. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  353. dma_addr_t handle, unsigned long offset, size_t size,
  354. enum dma_data_direction dir)
  355. {
  356. BUG_ON(!valid_dma_direction(dir));
  357. dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
  358. }
  359. static inline void dma_sync_single_range_for_device(struct device *dev,
  360. dma_addr_t handle, unsigned long offset, size_t size,
  361. enum dma_data_direction dir)
  362. {
  363. BUG_ON(!valid_dma_direction(dir));
  364. if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
  365. return;
  366. if (!arch_is_coherent())
  367. dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
  368. }
  369. static inline void dma_sync_single_for_cpu(struct device *dev,
  370. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  371. {
  372. dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
  373. }
  374. static inline void dma_sync_single_for_device(struct device *dev,
  375. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  376. {
  377. dma_sync_single_range_for_device(dev, handle, 0, size, dir);
  378. }
  379. /*
  380. * The scatter list versions of the above methods.
  381. */
  382. extern int dma_map_sg(struct device *, struct scatterlist *, int,
  383. enum dma_data_direction);
  384. extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
  385. enum dma_data_direction);
  386. extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
  387. enum dma_data_direction);
  388. extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
  389. enum dma_data_direction);
  390. #endif /* __KERNEL__ */
  391. #endif