pci-dma.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/vmalloc.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/homecache.h>
  19. /* Generic DMA mapping functions: */
  20. /*
  21. * Allocate what Linux calls "coherent" memory, which for us just
  22. * means uncached.
  23. */
  24. void *dma_alloc_coherent(struct device *dev,
  25. size_t size,
  26. dma_addr_t *dma_handle,
  27. gfp_t gfp)
  28. {
  29. u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
  30. int node = dev_to_node(dev);
  31. int order = get_order(size);
  32. struct page *pg;
  33. dma_addr_t addr;
  34. gfp |= __GFP_ZERO;
  35. /*
  36. * By forcing NUMA node 0 for 32-bit masks we ensure that the
  37. * high 32 bits of the resulting PA will be zero. If the mask
  38. * size is, e.g., 24, we may still not be able to guarantee a
  39. * suitable memory address, in which case we will return NULL.
  40. * But such devices are uncommon.
  41. */
  42. if (dma_mask <= DMA_BIT_MASK(32))
  43. node = 0;
  44. pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED);
  45. if (pg == NULL)
  46. return NULL;
  47. addr = page_to_phys(pg);
  48. if (addr + size > dma_mask) {
  49. homecache_free_pages(addr, order);
  50. return NULL;
  51. }
  52. *dma_handle = addr;
  53. return page_address(pg);
  54. }
  55. EXPORT_SYMBOL(dma_alloc_coherent);
  56. /*
  57. * Free memory that was allocated with dma_alloc_coherent.
  58. */
  59. void dma_free_coherent(struct device *dev, size_t size,
  60. void *vaddr, dma_addr_t dma_handle)
  61. {
  62. homecache_free_pages((unsigned long)vaddr, get_order(size));
  63. }
  64. EXPORT_SYMBOL(dma_free_coherent);
  65. /*
  66. * The map routines "map" the specified address range for DMA
  67. * accesses. The memory belongs to the device after this call is
  68. * issued, until it is unmapped with dma_unmap_single.
  69. *
  70. * We don't need to do any mapping, we just flush the address range
  71. * out of the cache and return a DMA address.
  72. *
  73. * The unmap routines do whatever is necessary before the processor
  74. * accesses the memory again, and must be called before the driver
  75. * touches the memory. We can get away with a cache invalidate if we
  76. * can count on nothing having been touched.
  77. */
  78. /* Flush a PA range from cache page by page. */
  79. static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
  80. {
  81. struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
  82. size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
  83. while ((ssize_t)size > 0) {
  84. /* Flush the page. */
  85. homecache_flush_cache(page++, 0);
  86. /* Figure out if we need to continue on the next page. */
  87. size -= bytesleft;
  88. bytesleft = PAGE_SIZE;
  89. }
  90. }
  91. /*
  92. * dma_map_single can be passed any memory address, and there appear
  93. * to be no alignment constraints.
  94. *
  95. * There is a chance that the start of the buffer will share a cache
  96. * line with some other data that has been touched in the meantime.
  97. */
  98. dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
  99. enum dma_data_direction direction)
  100. {
  101. dma_addr_t dma_addr = __pa(ptr);
  102. BUG_ON(!valid_dma_direction(direction));
  103. WARN_ON(size == 0);
  104. __dma_map_pa_range(dma_addr, size);
  105. return dma_addr;
  106. }
  107. EXPORT_SYMBOL(dma_map_single);
  108. void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  109. enum dma_data_direction direction)
  110. {
  111. BUG_ON(!valid_dma_direction(direction));
  112. }
  113. EXPORT_SYMBOL(dma_unmap_single);
  114. int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  115. enum dma_data_direction direction)
  116. {
  117. struct scatterlist *sg;
  118. int i;
  119. BUG_ON(!valid_dma_direction(direction));
  120. WARN_ON(nents == 0 || sglist->length == 0);
  121. for_each_sg(sglist, sg, nents, i) {
  122. sg->dma_address = sg_phys(sg);
  123. __dma_map_pa_range(sg->dma_address, sg->length);
  124. }
  125. return nents;
  126. }
  127. EXPORT_SYMBOL(dma_map_sg);
  128. void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  129. enum dma_data_direction direction)
  130. {
  131. BUG_ON(!valid_dma_direction(direction));
  132. }
  133. EXPORT_SYMBOL(dma_unmap_sg);
  134. dma_addr_t dma_map_page(struct device *dev, struct page *page,
  135. unsigned long offset, size_t size,
  136. enum dma_data_direction direction)
  137. {
  138. BUG_ON(!valid_dma_direction(direction));
  139. BUG_ON(offset + size > PAGE_SIZE);
  140. homecache_flush_cache(page, 0);
  141. return page_to_pa(page) + offset;
  142. }
  143. EXPORT_SYMBOL(dma_map_page);
  144. void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  145. enum dma_data_direction direction)
  146. {
  147. BUG_ON(!valid_dma_direction(direction));
  148. }
  149. EXPORT_SYMBOL(dma_unmap_page);
  150. void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  151. size_t size, enum dma_data_direction direction)
  152. {
  153. BUG_ON(!valid_dma_direction(direction));
  154. }
  155. EXPORT_SYMBOL(dma_sync_single_for_cpu);
  156. void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  157. size_t size, enum dma_data_direction direction)
  158. {
  159. unsigned long start = PFN_DOWN(dma_handle);
  160. unsigned long end = PFN_DOWN(dma_handle + size - 1);
  161. unsigned long i;
  162. BUG_ON(!valid_dma_direction(direction));
  163. for (i = start; i <= end; ++i)
  164. homecache_flush_cache(pfn_to_page(i), 0);
  165. }
  166. EXPORT_SYMBOL(dma_sync_single_for_device);
  167. void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  168. enum dma_data_direction direction)
  169. {
  170. BUG_ON(!valid_dma_direction(direction));
  171. WARN_ON(nelems == 0 || sg[0].length == 0);
  172. }
  173. EXPORT_SYMBOL(dma_sync_sg_for_cpu);
  174. /*
  175. * Flush and invalidate cache for scatterlist.
  176. */
  177. void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
  178. int nelems, enum dma_data_direction direction)
  179. {
  180. struct scatterlist *sg;
  181. int i;
  182. BUG_ON(!valid_dma_direction(direction));
  183. WARN_ON(nelems == 0 || sglist->length == 0);
  184. for_each_sg(sglist, sg, nelems, i) {
  185. dma_sync_single_for_device(dev, sg->dma_address,
  186. sg_dma_len(sg), direction);
  187. }
  188. }
  189. EXPORT_SYMBOL(dma_sync_sg_for_device);
  190. void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  191. unsigned long offset, size_t size,
  192. enum dma_data_direction direction)
  193. {
  194. dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
  195. }
  196. EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
  197. void dma_sync_single_range_for_device(struct device *dev,
  198. dma_addr_t dma_handle,
  199. unsigned long offset, size_t size,
  200. enum dma_data_direction direction)
  201. {
  202. dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
  203. }
  204. EXPORT_SYMBOL(dma_sync_single_range_for_device);
  205. /*
  206. * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
  207. * need to do any flushing here.
  208. */
  209. void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  210. enum dma_data_direction direction)
  211. {
  212. }
  213. EXPORT_SYMBOL(dma_cache_sync);