dma-ip32.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
  7. * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
  8. * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
  9. * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
  10. * IP32 changes by Ilya.
  11. */
  12. #include <linux/types.h>
  13. #include <linux/mm.h>
  14. #include <linux/module.h>
  15. #include <linux/string.h>
  16. #include <linux/dma-mapping.h>
  17. #include <asm/cache.h>
  18. #include <asm/io.h>
  19. #include <asm/ip32/crime.h>
  20. /*
  21. * Warning on the terminology - Linux calls an uncached area coherent;
  22. * MIPS terminology calls memory areas with hardware maintained coherency
  23. * coherent.
  24. */
  25. /*
  26. * Few notes.
  27. * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M
  28. * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian)
  29. * 3. All other devices see memory as one big chunk at 0x40000000
  30. * 4. Non-PCI devices will pass NULL as struct device*
  31. * Thus we translate differently, depending on device.
  32. */
  33. #define RAM_OFFSET_MASK 0x3fffffff
  34. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  35. dma_addr_t * dma_handle, int gfp)
  36. {
  37. void *ret;
  38. /* ignore region specifiers */
  39. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  40. if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
  41. gfp |= GFP_DMA;
  42. ret = (void *) __get_free_pages(gfp, get_order(size));
  43. if (ret != NULL) {
  44. unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK;
  45. memset(ret, 0, size);
  46. if(dev==NULL)
  47. addr+= CRIME_HI_MEM_BASE;
  48. *dma_handle = addr;
  49. }
  50. return ret;
  51. }
  52. EXPORT_SYMBOL(dma_alloc_noncoherent);
  53. void *dma_alloc_coherent(struct device *dev, size_t size,
  54. dma_addr_t * dma_handle, int gfp)
  55. {
  56. void *ret;
  57. ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
  58. if (ret) {
  59. dma_cache_wback_inv((unsigned long) ret, size);
  60. ret = UNCAC_ADDR(ret);
  61. }
  62. return ret;
  63. }
  64. EXPORT_SYMBOL(dma_alloc_coherent);
  65. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  66. dma_addr_t dma_handle)
  67. {
  68. free_pages((unsigned long) vaddr, get_order(size));
  69. }
  70. EXPORT_SYMBOL(dma_free_noncoherent);
  71. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  72. dma_addr_t dma_handle)
  73. {
  74. unsigned long addr = (unsigned long) vaddr;
  75. addr = CAC_ADDR(addr);
  76. free_pages(addr, get_order(size));
  77. }
  78. EXPORT_SYMBOL(dma_free_coherent);
  79. static inline void __dma_sync(unsigned long addr, size_t size,
  80. enum dma_data_direction direction)
  81. {
  82. switch (direction) {
  83. case DMA_TO_DEVICE:
  84. dma_cache_wback(addr, size);
  85. break;
  86. case DMA_FROM_DEVICE:
  87. dma_cache_inv(addr, size);
  88. break;
  89. case DMA_BIDIRECTIONAL:
  90. dma_cache_wback_inv(addr, size);
  91. break;
  92. default:
  93. BUG();
  94. }
  95. }
  96. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  97. enum dma_data_direction direction)
  98. {
  99. unsigned long addr = (unsigned long) ptr;
  100. switch (direction) {
  101. case DMA_TO_DEVICE:
  102. dma_cache_wback(addr, size);
  103. break;
  104. case DMA_FROM_DEVICE:
  105. dma_cache_inv(addr, size);
  106. break;
  107. case DMA_BIDIRECTIONAL:
  108. dma_cache_wback_inv(addr, size);
  109. break;
  110. default:
  111. BUG();
  112. }
  113. addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;;
  114. if(dev == NULL)
  115. addr+=CRIME_HI_MEM_BASE;
  116. return (dma_addr_t)addr;
  117. }
  118. EXPORT_SYMBOL(dma_map_single);
  119. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  120. enum dma_data_direction direction)
  121. {
  122. switch (direction) {
  123. case DMA_TO_DEVICE:
  124. break;
  125. case DMA_FROM_DEVICE:
  126. break;
  127. case DMA_BIDIRECTIONAL:
  128. break;
  129. default:
  130. BUG();
  131. }
  132. }
  133. EXPORT_SYMBOL(dma_unmap_single);
  134. int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  135. enum dma_data_direction direction)
  136. {
  137. int i;
  138. BUG_ON(direction == DMA_NONE);
  139. for (i = 0; i < nents; i++, sg++) {
  140. unsigned long addr;
  141. addr = (unsigned long) page_address(sg->page)+sg->offset;
  142. if (addr)
  143. __dma_sync(addr, sg->length, direction);
  144. addr = __pa(addr)&RAM_OFFSET_MASK;;
  145. if(dev == NULL)
  146. addr += CRIME_HI_MEM_BASE;
  147. sg->dma_address = (dma_addr_t)addr;
  148. }
  149. return nents;
  150. }
  151. EXPORT_SYMBOL(dma_map_sg);
  152. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  153. unsigned long offset, size_t size, enum dma_data_direction direction)
  154. {
  155. unsigned long addr;
  156. BUG_ON(direction == DMA_NONE);
  157. addr = (unsigned long) page_address(page) + offset;
  158. dma_cache_wback_inv(addr, size);
  159. addr = __pa(addr)&RAM_OFFSET_MASK;;
  160. if(dev == NULL)
  161. addr += CRIME_HI_MEM_BASE;
  162. return (dma_addr_t)addr;
  163. }
  164. EXPORT_SYMBOL(dma_map_page);
  165. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  166. enum dma_data_direction direction)
  167. {
  168. BUG_ON(direction == DMA_NONE);
  169. if (direction != DMA_TO_DEVICE) {
  170. unsigned long addr;
  171. dma_address&=RAM_OFFSET_MASK;
  172. addr = dma_address + PAGE_OFFSET;
  173. if(dma_address>=256*1024*1024)
  174. addr+=CRIME_HI_MEM_BASE;
  175. dma_cache_wback_inv(addr, size);
  176. }
  177. }
  178. EXPORT_SYMBOL(dma_unmap_page);
  179. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  180. enum dma_data_direction direction)
  181. {
  182. unsigned long addr;
  183. int i;
  184. BUG_ON(direction == DMA_NONE);
  185. if (direction == DMA_TO_DEVICE)
  186. return;
  187. for (i = 0; i < nhwentries; i++, sg++) {
  188. addr = (unsigned long) page_address(sg->page);
  189. if (!addr)
  190. continue;
  191. dma_cache_wback_inv(addr + sg->offset, sg->length);
  192. }
  193. }
  194. EXPORT_SYMBOL(dma_unmap_sg);
  195. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  196. size_t size, enum dma_data_direction direction)
  197. {
  198. unsigned long addr;
  199. BUG_ON(direction == DMA_NONE);
  200. dma_handle&=RAM_OFFSET_MASK;
  201. addr = dma_handle + PAGE_OFFSET;
  202. if(dma_handle>=256*1024*1024)
  203. addr+=CRIME_HI_MEM_BASE;
  204. __dma_sync(addr, size, direction);
  205. }
  206. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  207. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  208. size_t size, enum dma_data_direction direction)
  209. {
  210. unsigned long addr;
  211. BUG_ON(direction == DMA_NONE);
  212. dma_handle&=RAM_OFFSET_MASK;
  213. addr = dma_handle + PAGE_OFFSET;
  214. if(dma_handle>=256*1024*1024)
  215. addr+=CRIME_HI_MEM_BASE;
  216. __dma_sync(addr, size, direction);
  217. }
  218. EXPORT_SYMBOL(dma_sync_single_for_device);
  219. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  220. unsigned long offset, size_t size, enum dma_data_direction direction)
  221. {
  222. unsigned long addr;
  223. BUG_ON(direction == DMA_NONE);
  224. dma_handle&=RAM_OFFSET_MASK;
  225. addr = dma_handle + offset + PAGE_OFFSET;
  226. if(dma_handle>=256*1024*1024)
  227. addr+=CRIME_HI_MEM_BASE;
  228. __dma_sync(addr, size, direction);
  229. }
  230. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  231. void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  232. unsigned long offset, size_t size, enum dma_data_direction direction)
  233. {
  234. unsigned long addr;
  235. BUG_ON(direction == DMA_NONE);
  236. dma_handle&=RAM_OFFSET_MASK;
  237. addr = dma_handle + offset + PAGE_OFFSET;
  238. if(dma_handle>=256*1024*1024)
  239. addr+=CRIME_HI_MEM_BASE;
  240. __dma_sync(addr, size, direction);
  241. }
  242. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  243. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  244. enum dma_data_direction direction)
  245. {
  246. int i;
  247. BUG_ON(direction == DMA_NONE);
  248. /* Make sure that gcc doesn't leave the empty loop body. */
  249. for (i = 0; i < nelems; i++, sg++)
  250. __dma_sync((unsigned long)page_address(sg->page),
  251. sg->length, direction);
  252. }
  253. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  254. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  255. enum dma_data_direction direction)
  256. {
  257. int i;
  258. BUG_ON(direction == DMA_NONE);
  259. /* Make sure that gcc doesn't leave the empty loop body. */
  260. for (i = 0; i < nelems; i++, sg++)
  261. __dma_sync((unsigned long)page_address(sg->page),
  262. sg->length, direction);
  263. }
  264. EXPORT_SYMBOL(dma_sync_sg_for_device);
  265. int dma_mapping_error(dma_addr_t dma_addr)
  266. {
  267. return 0;
  268. }
  269. EXPORT_SYMBOL(dma_mapping_error);
  270. int dma_supported(struct device *dev, u64 mask)
  271. {
  272. /*
  273. * we fall back to GFP_DMA when the mask isn't all 1s,
  274. * so we can't guarantee allocations that must be
  275. * within a tighter range than GFP_DMA..
  276. */
  277. if (mask < 0x00ffffff)
  278. return 0;
  279. return 1;
  280. }
  281. EXPORT_SYMBOL(dma_supported);
  282. int dma_is_consistent(dma_addr_t dma_addr)
  283. {
  284. return 1;
  285. }
  286. EXPORT_SYMBOL(dma_is_consistent);
  287. void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
  288. {
  289. if (direction == DMA_NONE)
  290. return;
  291. dma_cache_wback_inv((unsigned long)vaddr, size);
  292. }
  293. EXPORT_SYMBOL(dma_cache_sync);