dma-default.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
  7. * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
  8. * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  9. */
  10. #include <linux/types.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/mm.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/string.h>
  16. #include <asm/cache.h>
  17. #include <asm/io.h>
  18. #include <dma-coherence.h>
  19. static inline unsigned long dma_addr_to_virt(struct device *dev,
  20. dma_addr_t dma_addr)
  21. {
  22. unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
  23. return (unsigned long)phys_to_virt(addr);
  24. }
  25. /*
  26. * Warning on the terminology - Linux calls an uncached area coherent;
  27. * MIPS terminology calls memory areas with hardware maintained coherency
  28. * coherent.
  29. */
  30. static inline int cpu_is_noncoherent_r10000(struct device *dev)
  31. {
  32. return !plat_device_is_coherent(dev) &&
  33. (current_cpu_type() == CPU_R10000 ||
  34. current_cpu_type() == CPU_R12000);
  35. }
  36. static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
  37. {
  38. /* ignore region specifiers */
  39. gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
  40. #ifdef CONFIG_ZONE_DMA
  41. if (dev == NULL)
  42. gfp |= __GFP_DMA;
  43. else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
  44. gfp |= __GFP_DMA;
  45. else
  46. #endif
  47. #ifdef CONFIG_ZONE_DMA32
  48. if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
  49. gfp |= __GFP_DMA32;
  50. else
  51. #endif
  52. ;
  53. /* Don't invoke OOM killer */
  54. gfp |= __GFP_NORETRY;
  55. return gfp;
  56. }
  57. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  58. dma_addr_t * dma_handle, gfp_t gfp)
  59. {
  60. void *ret;
  61. gfp = massage_gfp_flags(dev, gfp);
  62. ret = (void *) __get_free_pages(gfp, get_order(size));
  63. if (ret != NULL) {
  64. memset(ret, 0, size);
  65. *dma_handle = plat_map_dma_mem(dev, ret, size);
  66. }
  67. return ret;
  68. }
  69. EXPORT_SYMBOL(dma_alloc_noncoherent);
  70. void *dma_alloc_coherent(struct device *dev, size_t size,
  71. dma_addr_t * dma_handle, gfp_t gfp)
  72. {
  73. void *ret;
  74. gfp = massage_gfp_flags(dev, gfp);
  75. ret = (void *) __get_free_pages(gfp, get_order(size));
  76. if (ret) {
  77. memset(ret, 0, size);
  78. *dma_handle = plat_map_dma_mem(dev, ret, size);
  79. if (!plat_device_is_coherent(dev)) {
  80. dma_cache_wback_inv((unsigned long) ret, size);
  81. ret = UNCAC_ADDR(ret);
  82. }
  83. }
  84. return ret;
  85. }
  86. EXPORT_SYMBOL(dma_alloc_coherent);
  87. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  88. dma_addr_t dma_handle)
  89. {
  90. plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
  91. free_pages((unsigned long) vaddr, get_order(size));
  92. }
  93. EXPORT_SYMBOL(dma_free_noncoherent);
  94. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  95. dma_addr_t dma_handle)
  96. {
  97. unsigned long addr = (unsigned long) vaddr;
  98. plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
  99. if (!plat_device_is_coherent(dev))
  100. addr = CAC_ADDR(addr);
  101. free_pages(addr, get_order(size));
  102. }
  103. EXPORT_SYMBOL(dma_free_coherent);
  104. static inline void __dma_sync(unsigned long addr, size_t size,
  105. enum dma_data_direction direction)
  106. {
  107. switch (direction) {
  108. case DMA_TO_DEVICE:
  109. dma_cache_wback(addr, size);
  110. break;
  111. case DMA_FROM_DEVICE:
  112. dma_cache_inv(addr, size);
  113. break;
  114. case DMA_BIDIRECTIONAL:
  115. dma_cache_wback_inv(addr, size);
  116. break;
  117. default:
  118. BUG();
  119. }
  120. }
  121. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  122. enum dma_data_direction direction)
  123. {
  124. unsigned long addr = (unsigned long) ptr;
  125. if (!plat_device_is_coherent(dev))
  126. __dma_sync(addr, size, direction);
  127. return plat_map_dma_mem(dev, ptr, size);
  128. }
  129. EXPORT_SYMBOL(dma_map_single);
  130. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  131. enum dma_data_direction direction)
  132. {
  133. if (cpu_is_noncoherent_r10000(dev))
  134. __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
  135. direction);
  136. plat_unmap_dma_mem(dev, dma_addr, size, direction);
  137. }
  138. EXPORT_SYMBOL(dma_unmap_single);
  139. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  140. enum dma_data_direction direction)
  141. {
  142. int i;
  143. BUG_ON(direction == DMA_NONE);
  144. for (i = 0; i < nents; i++, sg++) {
  145. unsigned long addr;
  146. addr = (unsigned long) sg_virt(sg);
  147. if (!plat_device_is_coherent(dev) && addr)
  148. __dma_sync(addr, sg->length, direction);
  149. sg->dma_address = plat_map_dma_mem(dev,
  150. (void *)addr, sg->length);
  151. }
  152. return nents;
  153. }
  154. EXPORT_SYMBOL(dma_map_sg);
  155. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  156. unsigned long offset, size_t size, enum dma_data_direction direction)
  157. {
  158. BUG_ON(direction == DMA_NONE);
  159. if (!plat_device_is_coherent(dev)) {
  160. unsigned long addr;
  161. addr = (unsigned long) page_address(page) + offset;
  162. __dma_sync(addr, size, direction);
  163. }
  164. return plat_map_dma_mem_page(dev, page) + offset;
  165. }
  166. EXPORT_SYMBOL(dma_map_page);
  167. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  168. enum dma_data_direction direction)
  169. {
  170. unsigned long addr;
  171. int i;
  172. BUG_ON(direction == DMA_NONE);
  173. for (i = 0; i < nhwentries; i++, sg++) {
  174. if (!plat_device_is_coherent(dev) &&
  175. direction != DMA_TO_DEVICE) {
  176. addr = (unsigned long) sg_virt(sg);
  177. if (addr)
  178. __dma_sync(addr, sg->length, direction);
  179. }
  180. plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
  181. }
  182. }
  183. EXPORT_SYMBOL(dma_unmap_sg);
  184. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  185. size_t size, enum dma_data_direction direction)
  186. {
  187. BUG_ON(direction == DMA_NONE);
  188. if (cpu_is_noncoherent_r10000(dev)) {
  189. unsigned long addr;
  190. addr = dma_addr_to_virt(dev, dma_handle);
  191. __dma_sync(addr, size, direction);
  192. }
  193. }
  194. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  195. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  196. size_t size, enum dma_data_direction direction)
  197. {
  198. BUG_ON(direction == DMA_NONE);
  199. plat_extra_sync_for_device(dev);
  200. if (!plat_device_is_coherent(dev)) {
  201. unsigned long addr;
  202. addr = dma_addr_to_virt(dev, dma_handle);
  203. __dma_sync(addr, size, direction);
  204. }
  205. }
  206. EXPORT_SYMBOL(dma_sync_single_for_device);
  207. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  208. unsigned long offset, size_t size, enum dma_data_direction direction)
  209. {
  210. BUG_ON(direction == DMA_NONE);
  211. if (cpu_is_noncoherent_r10000(dev)) {
  212. unsigned long addr;
  213. addr = dma_addr_to_virt(dev, dma_handle);
  214. __dma_sync(addr + offset, size, direction);
  215. }
  216. }
  217. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  218. void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  219. unsigned long offset, size_t size, enum dma_data_direction direction)
  220. {
  221. BUG_ON(direction == DMA_NONE);
  222. plat_extra_sync_for_device(dev);
  223. if (!plat_device_is_coherent(dev)) {
  224. unsigned long addr;
  225. addr = dma_addr_to_virt(dev, dma_handle);
  226. __dma_sync(addr + offset, size, direction);
  227. }
  228. }
  229. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  230. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  231. enum dma_data_direction direction)
  232. {
  233. int i;
  234. BUG_ON(direction == DMA_NONE);
  235. /* Make sure that gcc doesn't leave the empty loop body. */
  236. for (i = 0; i < nelems; i++, sg++) {
  237. if (cpu_is_noncoherent_r10000(dev))
  238. __dma_sync((unsigned long)page_address(sg_page(sg)),
  239. sg->length, direction);
  240. }
  241. }
  242. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  243. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  244. enum dma_data_direction direction)
  245. {
  246. int i;
  247. BUG_ON(direction == DMA_NONE);
  248. /* Make sure that gcc doesn't leave the empty loop body. */
  249. for (i = 0; i < nelems; i++, sg++) {
  250. if (!plat_device_is_coherent(dev))
  251. __dma_sync((unsigned long)page_address(sg_page(sg)),
  252. sg->length, direction);
  253. }
  254. }
  255. EXPORT_SYMBOL(dma_sync_sg_for_device);
  256. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  257. {
  258. return plat_dma_mapping_error(dev, dma_addr);
  259. }
  260. EXPORT_SYMBOL(dma_mapping_error);
  261. int dma_supported(struct device *dev, u64 mask)
  262. {
  263. return plat_dma_supported(dev, mask);
  264. }
  265. EXPORT_SYMBOL(dma_supported);
  266. int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
  267. {
  268. return plat_device_is_coherent(dev);
  269. }
  270. EXPORT_SYMBOL(dma_is_consistent);
  271. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  272. enum dma_data_direction direction)
  273. {
  274. BUG_ON(direction == DMA_NONE);
  275. plat_extra_sync_for_device(dev);
  276. if (!plat_device_is_coherent(dev))
  277. __dma_sync((unsigned long)vaddr, size, direction);
  278. }
  279. EXPORT_SYMBOL(dma_cache_sync);