dma-mapping.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. /*
  2. * Copyright (C) 2004 IBM
  3. *
  4. * Implements the generic device dma API for powerpc.
  5. * the pci and vio busses
  6. */
  7. #ifndef _ASM_DMA_MAPPING_H
  8. #define _ASM_DMA_MAPPING_H
  9. #ifdef __KERNEL__
  10. #include <linux/config.h>
  11. #include <linux/types.h>
  12. #include <linux/cache.h>
  13. /* need struct page definitions */
  14. #include <linux/mm.h>
  15. #include <asm/scatterlist.h>
  16. #include <asm/io.h>
  17. #include <asm/bug.h>
  18. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  19. #ifdef CONFIG_NOT_COHERENT_CACHE
  20. /*
  21. * DMA-consistent mapping functions for PowerPCs that don't support
  22. * cache snooping. These allocate/free a region of uncached mapped
  23. * memory space for use with DMA devices. Alternatively, you could
  24. * allocate the space "normally" and use the cache management functions
  25. * to ensure it is consistent.
  26. */
  27. extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
  28. extern void __dma_free_coherent(size_t size, void *vaddr);
  29. extern void __dma_sync(void *vaddr, size_t size, int direction);
  30. extern void __dma_sync_page(struct page *page, unsigned long offset,
  31. size_t size, int direction);
  32. #else /* ! CONFIG_NOT_COHERENT_CACHE */
  33. /*
  34. * Cache coherent cores.
  35. */
  36. #define __dma_alloc_coherent(gfp, size, handle) NULL
  37. #define __dma_free_coherent(size, addr) do { } while (0)
  38. #define __dma_sync(addr, size, rw) do { } while (0)
  39. #define __dma_sync_page(pg, off, sz, rw) do { } while (0)
  40. #endif /* ! CONFIG_NOT_COHERENT_CACHE */
  41. #ifdef CONFIG_PPC64
  42. extern int dma_supported(struct device *dev, u64 mask);
  43. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  44. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  45. dma_addr_t *dma_handle, gfp_t flag);
  46. extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  47. dma_addr_t dma_handle);
  48. extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  49. size_t size, enum dma_data_direction direction);
  50. extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  51. size_t size, enum dma_data_direction direction);
  52. extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
  53. unsigned long offset, size_t size,
  54. enum dma_data_direction direction);
  55. extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  56. size_t size, enum dma_data_direction direction);
  57. extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  58. enum dma_data_direction direction);
  59. extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  60. int nhwentries, enum dma_data_direction direction);
  61. #else /* CONFIG_PPC64 */
  62. #define dma_supported(dev, mask) (1)
  63. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  64. {
  65. if (!dev->dma_mask || !dma_supported(dev, mask))
  66. return -EIO;
  67. *dev->dma_mask = dma_mask;
  68. return 0;
  69. }
  70. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  71. dma_addr_t * dma_handle,
  72. gfp_t gfp)
  73. {
  74. #ifdef CONFIG_NOT_COHERENT_CACHE
  75. return __dma_alloc_coherent(size, dma_handle, gfp);
  76. #else
  77. void *ret;
  78. /* ignore region specifiers */
  79. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  80. if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  81. gfp |= GFP_DMA;
  82. ret = (void *)__get_free_pages(gfp, get_order(size));
  83. if (ret != NULL) {
  84. memset(ret, 0, size);
  85. *dma_handle = virt_to_bus(ret);
  86. }
  87. return ret;
  88. #endif
  89. }
  90. static inline void
  91. dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  92. dma_addr_t dma_handle)
  93. {
  94. #ifdef CONFIG_NOT_COHERENT_CACHE
  95. __dma_free_coherent(size, vaddr);
  96. #else
  97. free_pages((unsigned long)vaddr, get_order(size));
  98. #endif
  99. }
  100. static inline dma_addr_t
  101. dma_map_single(struct device *dev, void *ptr, size_t size,
  102. enum dma_data_direction direction)
  103. {
  104. BUG_ON(direction == DMA_NONE);
  105. __dma_sync(ptr, size, direction);
  106. return virt_to_bus(ptr);
  107. }
  108. /* We do nothing. */
  109. #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
  110. static inline dma_addr_t
  111. dma_map_page(struct device *dev, struct page *page,
  112. unsigned long offset, size_t size,
  113. enum dma_data_direction direction)
  114. {
  115. BUG_ON(direction == DMA_NONE);
  116. __dma_sync_page(page, offset, size, direction);
  117. return page_to_bus(page) + offset;
  118. }
  119. /* We do nothing. */
  120. #define dma_unmap_page(dev, handle, size, dir) do { } while (0)
  121. static inline int
  122. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  123. enum dma_data_direction direction)
  124. {
  125. int i;
  126. BUG_ON(direction == DMA_NONE);
  127. for (i = 0; i < nents; i++, sg++) {
  128. BUG_ON(!sg->page);
  129. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  130. sg->dma_address = page_to_bus(sg->page) + sg->offset;
  131. }
  132. return nents;
  133. }
  134. /* We don't do anything here. */
  135. #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
  136. #endif /* CONFIG_PPC64 */
  137. static inline void dma_sync_single_for_cpu(struct device *dev,
  138. dma_addr_t dma_handle, size_t size,
  139. enum dma_data_direction direction)
  140. {
  141. BUG_ON(direction == DMA_NONE);
  142. __dma_sync(bus_to_virt(dma_handle), size, direction);
  143. }
  144. static inline void dma_sync_single_for_device(struct device *dev,
  145. dma_addr_t dma_handle, size_t size,
  146. enum dma_data_direction direction)
  147. {
  148. BUG_ON(direction == DMA_NONE);
  149. __dma_sync(bus_to_virt(dma_handle), size, direction);
  150. }
  151. static inline void dma_sync_sg_for_cpu(struct device *dev,
  152. struct scatterlist *sg, int nents,
  153. enum dma_data_direction direction)
  154. {
  155. int i;
  156. BUG_ON(direction == DMA_NONE);
  157. for (i = 0; i < nents; i++, sg++)
  158. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  159. }
  160. static inline void dma_sync_sg_for_device(struct device *dev,
  161. struct scatterlist *sg, int nents,
  162. enum dma_data_direction direction)
  163. {
  164. int i;
  165. BUG_ON(direction == DMA_NONE);
  166. for (i = 0; i < nents; i++, sg++)
  167. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  168. }
  169. static inline int dma_mapping_error(dma_addr_t dma_addr)
  170. {
  171. #ifdef CONFIG_PPC64
  172. return (dma_addr == DMA_ERROR_CODE);
  173. #else
  174. return 0;
  175. #endif
  176. }
  177. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  178. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  179. #ifdef CONFIG_NOT_COHERENT_CACHE
  180. #define dma_is_consistent(d) (0)
  181. #else
  182. #define dma_is_consistent(d) (1)
  183. #endif
  184. static inline int dma_get_cache_alignment(void)
  185. {
  186. #ifdef CONFIG_PPC64
  187. /* no easy way to get cache size on all processors, so return
  188. * the maximum possible, to be safe */
  189. return (1 << INTERNODE_CACHE_SHIFT);
  190. #else
  191. /*
  192. * Each processor family will define its own L1_CACHE_SHIFT,
  193. * L1_CACHE_BYTES wraps to this, so this is always safe.
  194. */
  195. return L1_CACHE_BYTES;
  196. #endif
  197. }
  198. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  199. dma_addr_t dma_handle, unsigned long offset, size_t size,
  200. enum dma_data_direction direction)
  201. {
  202. /* just sync everything for now */
  203. dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
  204. }
  205. static inline void dma_sync_single_range_for_device(struct device *dev,
  206. dma_addr_t dma_handle, unsigned long offset, size_t size,
  207. enum dma_data_direction direction)
  208. {
  209. /* just sync everything for now */
  210. dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
  211. }
  212. static inline void dma_cache_sync(void *vaddr, size_t size,
  213. enum dma_data_direction direction)
  214. {
  215. BUG_ON(direction == DMA_NONE);
  216. __dma_sync(vaddr, size, (int)direction);
  217. }
  218. /*
  219. * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
  220. */
  221. struct dma_mapping_ops {
  222. void * (*alloc_coherent)(struct device *dev, size_t size,
  223. dma_addr_t *dma_handle, gfp_t flag);
  224. void (*free_coherent)(struct device *dev, size_t size,
  225. void *vaddr, dma_addr_t dma_handle);
  226. dma_addr_t (*map_single)(struct device *dev, void *ptr,
  227. size_t size, enum dma_data_direction direction);
  228. void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
  229. size_t size, enum dma_data_direction direction);
  230. int (*map_sg)(struct device *dev, struct scatterlist *sg,
  231. int nents, enum dma_data_direction direction);
  232. void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
  233. int nents, enum dma_data_direction direction);
  234. int (*dma_supported)(struct device *dev, u64 mask);
  235. int (*dac_dma_supported)(struct device *dev, u64 mask);
  236. };
  237. #endif /* __KERNEL__ */
  238. #endif /* _ASM_DMA_MAPPING_H */