dma-default.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
  7. * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
  8. * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  9. */
  10. #include <linux/types.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/mm.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/string.h>
  16. #include <asm/cache.h>
  17. #include <asm/io.h>
  18. #include <dma-coherence.h>
  19. static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
  20. {
  21. unsigned long addr = plat_dma_addr_to_phys(dma_addr);
  22. return (unsigned long)phys_to_virt(addr);
  23. }
  24. /*
  25. * Warning on the terminology - Linux calls an uncached area coherent;
  26. * MIPS terminology calls memory areas with hardware maintained coherency
  27. * coherent.
  28. */
  29. static inline int cpu_is_noncoherent_r10000(struct device *dev)
  30. {
  31. return !plat_device_is_coherent(dev) &&
  32. (current_cpu_type() == CPU_R10000 ||
  33. current_cpu_type() == CPU_R12000);
  34. }
  35. static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
  36. {
  37. /* ignore region specifiers */
  38. gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
  39. #ifdef CONFIG_ZONE_DMA
  40. if (dev == NULL)
  41. gfp |= __GFP_DMA;
  42. else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
  43. gfp |= __GFP_DMA;
  44. else
  45. #endif
  46. #ifdef CONFIG_ZONE_DMA32
  47. if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
  48. gfp |= __GFP_DMA32;
  49. else
  50. #endif
  51. ;
  52. /* Don't invoke OOM killer */
  53. gfp |= __GFP_NORETRY;
  54. return gfp;
  55. }
  56. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  57. dma_addr_t * dma_handle, gfp_t gfp)
  58. {
  59. void *ret;
  60. gfp = massage_gfp_flags(dev, gfp);
  61. ret = (void *) __get_free_pages(gfp, get_order(size));
  62. if (ret != NULL) {
  63. memset(ret, 0, size);
  64. *dma_handle = plat_map_dma_mem(dev, ret, size);
  65. }
  66. return ret;
  67. }
  68. EXPORT_SYMBOL(dma_alloc_noncoherent);
  69. void *dma_alloc_coherent(struct device *dev, size_t size,
  70. dma_addr_t * dma_handle, gfp_t gfp)
  71. {
  72. void *ret;
  73. gfp = massage_gfp_flags(dev, gfp);
  74. ret = (void *) __get_free_pages(gfp, get_order(size));
  75. if (ret) {
  76. memset(ret, 0, size);
  77. *dma_handle = plat_map_dma_mem(dev, ret, size);
  78. if (!plat_device_is_coherent(dev)) {
  79. dma_cache_wback_inv((unsigned long) ret, size);
  80. ret = UNCAC_ADDR(ret);
  81. }
  82. }
  83. return ret;
  84. }
  85. EXPORT_SYMBOL(dma_alloc_coherent);
  86. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  87. dma_addr_t dma_handle)
  88. {
  89. plat_unmap_dma_mem(dev, dma_handle);
  90. free_pages((unsigned long) vaddr, get_order(size));
  91. }
  92. EXPORT_SYMBOL(dma_free_noncoherent);
  93. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  94. dma_addr_t dma_handle)
  95. {
  96. unsigned long addr = (unsigned long) vaddr;
  97. plat_unmap_dma_mem(dev, dma_handle);
  98. if (!plat_device_is_coherent(dev))
  99. addr = CAC_ADDR(addr);
  100. free_pages(addr, get_order(size));
  101. }
  102. EXPORT_SYMBOL(dma_free_coherent);
  103. static inline void __dma_sync(unsigned long addr, size_t size,
  104. enum dma_data_direction direction)
  105. {
  106. switch (direction) {
  107. case DMA_TO_DEVICE:
  108. dma_cache_wback(addr, size);
  109. break;
  110. case DMA_FROM_DEVICE:
  111. dma_cache_inv(addr, size);
  112. break;
  113. case DMA_BIDIRECTIONAL:
  114. dma_cache_wback_inv(addr, size);
  115. break;
  116. default:
  117. BUG();
  118. }
  119. }
  120. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  121. enum dma_data_direction direction)
  122. {
  123. unsigned long addr = (unsigned long) ptr;
  124. if (!plat_device_is_coherent(dev))
  125. __dma_sync(addr, size, direction);
  126. return plat_map_dma_mem(dev, ptr, size);
  127. }
  128. EXPORT_SYMBOL(dma_map_single);
  129. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  130. enum dma_data_direction direction)
  131. {
  132. if (cpu_is_noncoherent_r10000(dev))
  133. __dma_sync(dma_addr_to_virt(dma_addr), size,
  134. direction);
  135. plat_unmap_dma_mem(dev, dma_addr);
  136. }
  137. EXPORT_SYMBOL(dma_unmap_single);
  138. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  139. enum dma_data_direction direction)
  140. {
  141. int i;
  142. BUG_ON(direction == DMA_NONE);
  143. for (i = 0; i < nents; i++, sg++) {
  144. unsigned long addr;
  145. addr = (unsigned long) sg_virt(sg);
  146. if (!plat_device_is_coherent(dev) && addr)
  147. __dma_sync(addr, sg->length, direction);
  148. sg->dma_address = plat_map_dma_mem(dev,
  149. (void *)addr, sg->length);
  150. }
  151. return nents;
  152. }
  153. EXPORT_SYMBOL(dma_map_sg);
  154. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  155. unsigned long offset, size_t size, enum dma_data_direction direction)
  156. {
  157. BUG_ON(direction == DMA_NONE);
  158. if (!plat_device_is_coherent(dev)) {
  159. unsigned long addr;
  160. addr = (unsigned long) page_address(page) + offset;
  161. __dma_sync(addr, size, direction);
  162. }
  163. return plat_map_dma_mem_page(dev, page) + offset;
  164. }
  165. EXPORT_SYMBOL(dma_map_page);
  166. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  167. enum dma_data_direction direction)
  168. {
  169. unsigned long addr;
  170. int i;
  171. BUG_ON(direction == DMA_NONE);
  172. for (i = 0; i < nhwentries; i++, sg++) {
  173. if (!plat_device_is_coherent(dev) &&
  174. direction != DMA_TO_DEVICE) {
  175. addr = (unsigned long) sg_virt(sg);
  176. if (addr)
  177. __dma_sync(addr, sg->length, direction);
  178. }
  179. plat_unmap_dma_mem(dev, sg->dma_address);
  180. }
  181. }
  182. EXPORT_SYMBOL(dma_unmap_sg);
  183. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  184. size_t size, enum dma_data_direction direction)
  185. {
  186. BUG_ON(direction == DMA_NONE);
  187. if (cpu_is_noncoherent_r10000(dev)) {
  188. unsigned long addr;
  189. addr = dma_addr_to_virt(dma_handle);
  190. __dma_sync(addr, size, direction);
  191. }
  192. }
  193. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  194. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  195. size_t size, enum dma_data_direction direction)
  196. {
  197. BUG_ON(direction == DMA_NONE);
  198. plat_extra_sync_for_device(dev);
  199. if (!plat_device_is_coherent(dev)) {
  200. unsigned long addr;
  201. addr = dma_addr_to_virt(dma_handle);
  202. __dma_sync(addr, size, direction);
  203. }
  204. }
  205. EXPORT_SYMBOL(dma_sync_single_for_device);
  206. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  207. unsigned long offset, size_t size, enum dma_data_direction direction)
  208. {
  209. BUG_ON(direction == DMA_NONE);
  210. if (cpu_is_noncoherent_r10000(dev)) {
  211. unsigned long addr;
  212. addr = dma_addr_to_virt(dma_handle);
  213. __dma_sync(addr + offset, size, direction);
  214. }
  215. }
  216. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  217. void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  218. unsigned long offset, size_t size, enum dma_data_direction direction)
  219. {
  220. BUG_ON(direction == DMA_NONE);
  221. plat_extra_sync_for_device(dev);
  222. if (!plat_device_is_coherent(dev)) {
  223. unsigned long addr;
  224. addr = dma_addr_to_virt(dma_handle);
  225. __dma_sync(addr + offset, size, direction);
  226. }
  227. }
  228. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  229. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  230. enum dma_data_direction direction)
  231. {
  232. int i;
  233. BUG_ON(direction == DMA_NONE);
  234. /* Make sure that gcc doesn't leave the empty loop body. */
  235. for (i = 0; i < nelems; i++, sg++) {
  236. if (cpu_is_noncoherent_r10000(dev))
  237. __dma_sync((unsigned long)page_address(sg_page(sg)),
  238. sg->length, direction);
  239. }
  240. }
  241. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  242. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  243. enum dma_data_direction direction)
  244. {
  245. int i;
  246. BUG_ON(direction == DMA_NONE);
  247. /* Make sure that gcc doesn't leave the empty loop body. */
  248. for (i = 0; i < nelems; i++, sg++) {
  249. if (!plat_device_is_coherent(dev))
  250. __dma_sync((unsigned long)page_address(sg_page(sg)),
  251. sg->length, direction);
  252. }
  253. }
  254. EXPORT_SYMBOL(dma_sync_sg_for_device);
  255. int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  256. {
  257. return plat_dma_mapping_error(dev, dma_addr);
  258. }
  259. EXPORT_SYMBOL(dma_mapping_error);
  260. int dma_supported(struct device *dev, u64 mask)
  261. {
  262. return plat_dma_supported(dev, mask);
  263. }
  264. EXPORT_SYMBOL(dma_supported);
  265. int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
  266. {
  267. return plat_device_is_coherent(dev);
  268. }
  269. EXPORT_SYMBOL(dma_is_consistent);
  270. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  271. enum dma_data_direction direction)
  272. {
  273. BUG_ON(direction == DMA_NONE);
  274. plat_extra_sync_for_device(dev);
  275. if (!plat_device_is_coherent(dev))
  276. __dma_sync((unsigned long)vaddr, size, direction);
  277. }
  278. EXPORT_SYMBOL(dma_cache_sync);