dma-default.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
  7. * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
  8. * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  9. */
  10. #include <linux/types.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/mm.h>
  13. #include <linux/module.h>
  14. #include <linux/string.h>
  15. #include <asm/cache.h>
  16. #include <asm/io.h>
  17. #include <dma-coherence.h>
  18. static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
  19. {
  20. unsigned long addr = plat_dma_addr_to_phys(dma_addr);
  21. return (unsigned long)phys_to_virt(addr);
  22. }
  23. /*
  24. * Warning on the terminology - Linux calls an uncached area coherent;
  25. * MIPS terminology calls memory areas with hardware maintained coherency
  26. * coherent.
  27. */
  28. static inline int cpu_is_noncoherent_r10000(struct device *dev)
  29. {
  30. return !plat_device_is_coherent(dev) &&
  31. (current_cpu_data.cputype == CPU_R10000 &&
  32. current_cpu_data.cputype == CPU_R12000);
  33. }
  34. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  35. dma_addr_t * dma_handle, gfp_t gfp)
  36. {
  37. void *ret;
  38. /* ignore region specifiers */
  39. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  40. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  41. gfp |= GFP_DMA;
  42. ret = (void *) __get_free_pages(gfp, get_order(size));
  43. if (ret != NULL) {
  44. memset(ret, 0, size);
  45. *dma_handle = plat_map_dma_mem(dev, ret, size);
  46. }
  47. return ret;
  48. }
  49. EXPORT_SYMBOL(dma_alloc_noncoherent);
  50. void *dma_alloc_coherent(struct device *dev, size_t size,
  51. dma_addr_t * dma_handle, gfp_t gfp)
  52. {
  53. void *ret;
  54. /* ignore region specifiers */
  55. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  56. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  57. gfp |= GFP_DMA;
  58. ret = (void *) __get_free_pages(gfp, get_order(size));
  59. if (ret) {
  60. memset(ret, 0, size);
  61. *dma_handle = plat_map_dma_mem(dev, ret, size);
  62. if (!plat_device_is_coherent(dev)) {
  63. dma_cache_wback_inv((unsigned long) ret, size);
  64. ret = UNCAC_ADDR(ret);
  65. }
  66. }
  67. return ret;
  68. }
  69. EXPORT_SYMBOL(dma_alloc_coherent);
  70. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  71. dma_addr_t dma_handle)
  72. {
  73. free_pages((unsigned long) vaddr, get_order(size));
  74. }
  75. EXPORT_SYMBOL(dma_free_noncoherent);
  76. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  77. dma_addr_t dma_handle)
  78. {
  79. unsigned long addr = (unsigned long) vaddr;
  80. if (!plat_device_is_coherent(dev))
  81. addr = CAC_ADDR(addr);
  82. free_pages(addr, get_order(size));
  83. }
  84. EXPORT_SYMBOL(dma_free_coherent);
  85. static inline void __dma_sync(unsigned long addr, size_t size,
  86. enum dma_data_direction direction)
  87. {
  88. switch (direction) {
  89. case DMA_TO_DEVICE:
  90. dma_cache_wback(addr, size);
  91. break;
  92. case DMA_FROM_DEVICE:
  93. dma_cache_inv(addr, size);
  94. break;
  95. case DMA_BIDIRECTIONAL:
  96. dma_cache_wback_inv(addr, size);
  97. break;
  98. default:
  99. BUG();
  100. }
  101. }
  102. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  103. enum dma_data_direction direction)
  104. {
  105. unsigned long addr = (unsigned long) ptr;
  106. if (!plat_device_is_coherent(dev))
  107. __dma_sync(addr, size, direction);
  108. return plat_map_dma_mem(dev, ptr, size);
  109. }
  110. EXPORT_SYMBOL(dma_map_single);
  111. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  112. enum dma_data_direction direction)
  113. {
  114. if (cpu_is_noncoherent_r10000(dev))
  115. __dma_sync(dma_addr_to_virt(dma_addr), size,
  116. direction);
  117. plat_unmap_dma_mem(dma_addr);
  118. }
  119. EXPORT_SYMBOL(dma_unmap_single);
  120. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  121. enum dma_data_direction direction)
  122. {
  123. int i;
  124. BUG_ON(direction == DMA_NONE);
  125. for (i = 0; i < nents; i++, sg++) {
  126. unsigned long addr;
  127. addr = (unsigned long) page_address(sg->page);
  128. if (!plat_device_is_coherent(dev) && addr)
  129. __dma_sync(addr + sg->offset, sg->length, direction);
  130. sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
  131. sg->offset;
  132. }
  133. return nents;
  134. }
  135. EXPORT_SYMBOL(dma_map_sg);
  136. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  137. unsigned long offset, size_t size, enum dma_data_direction direction)
  138. {
  139. BUG_ON(direction == DMA_NONE);
  140. if (!plat_device_is_coherent(dev)) {
  141. unsigned long addr;
  142. addr = (unsigned long) page_address(page) + offset;
  143. dma_cache_wback_inv(addr, size);
  144. }
  145. return plat_map_dma_mem_page(dev, page) + offset;
  146. }
  147. EXPORT_SYMBOL(dma_map_page);
  148. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  149. enum dma_data_direction direction)
  150. {
  151. BUG_ON(direction == DMA_NONE);
  152. if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
  153. unsigned long addr;
  154. addr = plat_dma_addr_to_phys(dma_address);
  155. dma_cache_wback_inv(addr, size);
  156. }
  157. plat_unmap_dma_mem(dma_address);
  158. }
  159. EXPORT_SYMBOL(dma_unmap_page);
  160. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  161. enum dma_data_direction direction)
  162. {
  163. unsigned long addr;
  164. int i;
  165. BUG_ON(direction == DMA_NONE);
  166. for (i = 0; i < nhwentries; i++, sg++) {
  167. if (!plat_device_is_coherent(dev) &&
  168. direction != DMA_TO_DEVICE) {
  169. addr = (unsigned long) page_address(sg->page);
  170. if (addr)
  171. __dma_sync(addr + sg->offset, sg->length,
  172. direction);
  173. }
  174. plat_unmap_dma_mem(sg->dma_address);
  175. }
  176. }
  177. EXPORT_SYMBOL(dma_unmap_sg);
  178. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  179. size_t size, enum dma_data_direction direction)
  180. {
  181. BUG_ON(direction == DMA_NONE);
  182. if (cpu_is_noncoherent_r10000(dev)) {
  183. unsigned long addr;
  184. addr = dma_addr_to_virt(dma_handle);
  185. __dma_sync(addr, size, direction);
  186. }
  187. }
  188. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  189. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  190. size_t size, enum dma_data_direction direction)
  191. {
  192. BUG_ON(direction == DMA_NONE);
  193. if (!plat_device_is_coherent(dev)) {
  194. unsigned long addr;
  195. addr = dma_addr_to_virt(dma_handle);
  196. __dma_sync(addr, size, direction);
  197. }
  198. }
  199. EXPORT_SYMBOL(dma_sync_single_for_device);
  200. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  201. unsigned long offset, size_t size, enum dma_data_direction direction)
  202. {
  203. BUG_ON(direction == DMA_NONE);
  204. if (cpu_is_noncoherent_r10000(dev)) {
  205. unsigned long addr;
  206. addr = dma_addr_to_virt(dma_handle);
  207. __dma_sync(addr + offset, size, direction);
  208. }
  209. }
  210. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  211. void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  212. unsigned long offset, size_t size, enum dma_data_direction direction)
  213. {
  214. BUG_ON(direction == DMA_NONE);
  215. if (!plat_device_is_coherent(dev)) {
  216. unsigned long addr;
  217. addr = dma_addr_to_virt(dma_handle);
  218. __dma_sync(addr + offset, size, direction);
  219. }
  220. }
  221. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  222. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  223. enum dma_data_direction direction)
  224. {
  225. int i;
  226. BUG_ON(direction == DMA_NONE);
  227. /* Make sure that gcc doesn't leave the empty loop body. */
  228. for (i = 0; i < nelems; i++, sg++) {
  229. if (cpu_is_noncoherent_r10000(dev))
  230. __dma_sync((unsigned long)page_address(sg->page),
  231. sg->length, direction);
  232. plat_unmap_dma_mem(sg->dma_address);
  233. }
  234. }
  235. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  236. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  237. enum dma_data_direction direction)
  238. {
  239. int i;
  240. BUG_ON(direction == DMA_NONE);
  241. /* Make sure that gcc doesn't leave the empty loop body. */
  242. for (i = 0; i < nelems; i++, sg++) {
  243. if (!plat_device_is_coherent(dev))
  244. __dma_sync((unsigned long)page_address(sg->page),
  245. sg->length, direction);
  246. plat_unmap_dma_mem(sg->dma_address);
  247. }
  248. }
  249. EXPORT_SYMBOL(dma_sync_sg_for_device);
  250. int dma_mapping_error(dma_addr_t dma_addr)
  251. {
  252. return 0;
  253. }
  254. EXPORT_SYMBOL(dma_mapping_error);
  255. int dma_supported(struct device *dev, u64 mask)
  256. {
  257. /*
  258. * we fall back to GFP_DMA when the mask isn't all 1s,
  259. * so we can't guarantee allocations that must be
  260. * within a tighter range than GFP_DMA..
  261. */
  262. if (mask < 0x00ffffff)
  263. return 0;
  264. return 1;
  265. }
  266. EXPORT_SYMBOL(dma_supported);
  267. int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
  268. {
  269. return plat_device_is_coherent(dev);
  270. }
  271. EXPORT_SYMBOL(dma_is_consistent);
  272. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  273. enum dma_data_direction direction)
  274. {
  275. BUG_ON(direction == DMA_NONE);
  276. if (!plat_device_is_coherent(dev))
  277. dma_cache_wback_inv((unsigned long)vaddr, size);
  278. }
  279. EXPORT_SYMBOL(dma_cache_sync);