dma-noncoherent.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
  7. * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
  8. * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  9. */
  10. #include <linux/config.h>
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/module.h>
  14. #include <linux/string.h>
  15. #include <linux/dma-mapping.h>
  16. #include <asm/cache.h>
  17. #include <asm/io.h>
  18. /*
  19. * Warning on the terminology - Linux calls an uncached area coherent;
  20. * MIPS terminology calls memory areas with hardware maintained coherency
  21. * coherent.
  22. */
  23. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  24. dma_addr_t * dma_handle, int gfp)
  25. {
  26. void *ret;
  27. /* ignore region specifiers */
  28. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  29. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  30. gfp |= GFP_DMA;
  31. ret = (void *) __get_free_pages(gfp, get_order(size));
  32. if (ret != NULL) {
  33. memset(ret, 0, size);
  34. *dma_handle = virt_to_phys(ret);
  35. }
  36. return ret;
  37. }
  38. EXPORT_SYMBOL(dma_alloc_noncoherent);
  39. void *dma_alloc_coherent(struct device *dev, size_t size,
  40. dma_addr_t * dma_handle, int gfp)
  41. {
  42. void *ret;
  43. ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
  44. if (ret) {
  45. dma_cache_wback_inv((unsigned long) ret, size);
  46. ret = UNCAC_ADDR(ret);
  47. }
  48. return ret;
  49. }
  50. EXPORT_SYMBOL(dma_alloc_coherent);
  51. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  52. dma_addr_t dma_handle)
  53. {
  54. free_pages((unsigned long) vaddr, get_order(size));
  55. }
  56. EXPORT_SYMBOL(dma_free_noncoherent);
  57. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  58. dma_addr_t dma_handle)
  59. {
  60. unsigned long addr = (unsigned long) vaddr;
  61. addr = CAC_ADDR(addr);
  62. free_pages(addr, get_order(size));
  63. }
  64. EXPORT_SYMBOL(dma_free_coherent);
  65. static inline void __dma_sync(unsigned long addr, size_t size,
  66. enum dma_data_direction direction)
  67. {
  68. switch (direction) {
  69. case DMA_TO_DEVICE:
  70. dma_cache_wback(addr, size);
  71. break;
  72. case DMA_FROM_DEVICE:
  73. dma_cache_inv(addr, size);
  74. break;
  75. case DMA_BIDIRECTIONAL:
  76. dma_cache_wback_inv(addr, size);
  77. break;
  78. default:
  79. BUG();
  80. }
  81. }
  82. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  83. enum dma_data_direction direction)
  84. {
  85. unsigned long addr = (unsigned long) ptr;
  86. switch (direction) {
  87. case DMA_TO_DEVICE:
  88. dma_cache_wback(addr, size);
  89. break;
  90. case DMA_FROM_DEVICE:
  91. dma_cache_inv(addr, size);
  92. break;
  93. case DMA_BIDIRECTIONAL:
  94. dma_cache_wback_inv(addr, size);
  95. break;
  96. default:
  97. BUG();
  98. }
  99. return virt_to_phys(ptr);
  100. }
  101. EXPORT_SYMBOL(dma_map_single);
  102. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  103. enum dma_data_direction direction)
  104. {
  105. unsigned long addr;
  106. addr = dma_addr + PAGE_OFFSET;
  107. switch (direction) {
  108. case DMA_TO_DEVICE:
  109. //dma_cache_wback(addr, size);
  110. break;
  111. case DMA_FROM_DEVICE:
  112. //dma_cache_inv(addr, size);
  113. break;
  114. case DMA_BIDIRECTIONAL:
  115. //dma_cache_wback_inv(addr, size);
  116. break;
  117. default:
  118. BUG();
  119. }
  120. }
  121. EXPORT_SYMBOL(dma_unmap_single);
  122. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  123. enum dma_data_direction direction)
  124. {
  125. int i;
  126. BUG_ON(direction == DMA_NONE);
  127. for (i = 0; i < nents; i++, sg++) {
  128. unsigned long addr;
  129. addr = (unsigned long) page_address(sg->page);
  130. if (addr)
  131. __dma_sync(addr + sg->offset, sg->length, direction);
  132. sg->dma_address = (dma_addr_t)
  133. (page_to_phys(sg->page) + sg->offset);
  134. }
  135. return nents;
  136. }
  137. EXPORT_SYMBOL(dma_map_sg);
  138. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  139. unsigned long offset, size_t size, enum dma_data_direction direction)
  140. {
  141. unsigned long addr;
  142. BUG_ON(direction == DMA_NONE);
  143. addr = (unsigned long) page_address(page) + offset;
  144. dma_cache_wback_inv(addr, size);
  145. return page_to_phys(page) + offset;
  146. }
  147. EXPORT_SYMBOL(dma_map_page);
  148. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  149. enum dma_data_direction direction)
  150. {
  151. BUG_ON(direction == DMA_NONE);
  152. if (direction != DMA_TO_DEVICE) {
  153. unsigned long addr;
  154. addr = dma_address + PAGE_OFFSET;
  155. dma_cache_wback_inv(addr, size);
  156. }
  157. }
  158. EXPORT_SYMBOL(dma_unmap_page);
  159. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  160. enum dma_data_direction direction)
  161. {
  162. unsigned long addr;
  163. int i;
  164. BUG_ON(direction == DMA_NONE);
  165. if (direction == DMA_TO_DEVICE)
  166. return;
  167. for (i = 0; i < nhwentries; i++, sg++) {
  168. addr = (unsigned long) page_address(sg->page);
  169. if (!addr)
  170. continue;
  171. dma_cache_wback_inv(addr + sg->offset, sg->length);
  172. }
  173. }
  174. EXPORT_SYMBOL(dma_unmap_sg);
  175. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  176. size_t size, enum dma_data_direction direction)
  177. {
  178. unsigned long addr;
  179. BUG_ON(direction == DMA_NONE);
  180. addr = dma_handle + PAGE_OFFSET;
  181. __dma_sync(addr, size, direction);
  182. }
  183. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  184. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  185. size_t size, enum dma_data_direction direction)
  186. {
  187. unsigned long addr;
  188. BUG_ON(direction == DMA_NONE);
  189. addr = dma_handle + PAGE_OFFSET;
  190. __dma_sync(addr, size, direction);
  191. }
  192. EXPORT_SYMBOL(dma_sync_single_for_device);
  193. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  194. unsigned long offset, size_t size, enum dma_data_direction direction)
  195. {
  196. unsigned long addr;
  197. BUG_ON(direction == DMA_NONE);
  198. addr = dma_handle + offset + PAGE_OFFSET;
  199. __dma_sync(addr, size, direction);
  200. }
  201. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  202. void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  203. unsigned long offset, size_t size, enum dma_data_direction direction)
  204. {
  205. unsigned long addr;
  206. BUG_ON(direction == DMA_NONE);
  207. addr = dma_handle + offset + PAGE_OFFSET;
  208. __dma_sync(addr, size, direction);
  209. }
  210. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  211. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  212. enum dma_data_direction direction)
  213. {
  214. int i;
  215. BUG_ON(direction == DMA_NONE);
  216. /* Make sure that gcc doesn't leave the empty loop body. */
  217. for (i = 0; i < nelems; i++, sg++)
  218. __dma_sync((unsigned long)page_address(sg->page),
  219. sg->length, direction);
  220. }
  221. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  222. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  223. enum dma_data_direction direction)
  224. {
  225. int i;
  226. BUG_ON(direction == DMA_NONE);
  227. /* Make sure that gcc doesn't leave the empty loop body. */
  228. for (i = 0; i < nelems; i++, sg++)
  229. __dma_sync((unsigned long)page_address(sg->page),
  230. sg->length, direction);
  231. }
  232. EXPORT_SYMBOL(dma_sync_sg_for_device);
  233. int dma_mapping_error(dma_addr_t dma_addr)
  234. {
  235. return 0;
  236. }
  237. EXPORT_SYMBOL(dma_mapping_error);
  238. int dma_supported(struct device *dev, u64 mask)
  239. {
  240. /*
  241. * we fall back to GFP_DMA when the mask isn't all 1s,
  242. * so we can't guarantee allocations that must be
  243. * within a tighter range than GFP_DMA..
  244. */
  245. if (mask < 0x00ffffff)
  246. return 0;
  247. return 1;
  248. }
  249. EXPORT_SYMBOL(dma_supported);
  250. int dma_is_consistent(dma_addr_t dma_addr)
  251. {
  252. return 1;
  253. }
  254. EXPORT_SYMBOL(dma_is_consistent);
  255. void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
  256. {
  257. if (direction == DMA_NONE)
  258. return;
  259. dma_cache_wback_inv((unsigned long)vaddr, size);
  260. }
  261. EXPORT_SYMBOL(dma_cache_sync);
  262. /* The DAC routines are a PCIism.. */
  263. #ifdef CONFIG_PCI
  264. #include <linux/pci.h>
  265. dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
  266. struct page *page, unsigned long offset, int direction)
  267. {
  268. return (dma64_addr_t)page_to_phys(page) + offset;
  269. }
  270. EXPORT_SYMBOL(pci_dac_page_to_dma);
  271. struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
  272. dma64_addr_t dma_addr)
  273. {
  274. return mem_map + (dma_addr >> PAGE_SHIFT);
  275. }
  276. EXPORT_SYMBOL(pci_dac_dma_to_page);
  277. unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
  278. dma64_addr_t dma_addr)
  279. {
  280. return dma_addr & ~PAGE_MASK;
  281. }
  282. EXPORT_SYMBOL(pci_dac_dma_to_offset);
  283. void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
  284. dma64_addr_t dma_addr, size_t len, int direction)
  285. {
  286. BUG_ON(direction == PCI_DMA_NONE);
  287. dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
  288. }
  289. EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
  290. void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
  291. dma64_addr_t dma_addr, size_t len, int direction)
  292. {
  293. BUG_ON(direction == PCI_DMA_NONE);
  294. dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
  295. }
  296. EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
  297. #endif /* CONFIG_PCI */