dma-mapping.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Copyright (C) 2004 IBM
  3. *
  4. * Implements the generic device dma API for powerpc.
  5. * the pci and vio busses
  6. */
  7. #ifndef _ASM_DMA_MAPPING_H
  8. #define _ASM_DMA_MAPPING_H
  9. #ifdef __KERNEL__
  10. #include <linux/config.h>
  11. #include <linux/types.h>
  12. #include <linux/cache.h>
  13. /* need struct page definitions */
  14. #include <linux/mm.h>
  15. #include <asm/scatterlist.h>
  16. #include <asm/io.h>
  17. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  18. #ifdef CONFIG_NOT_COHERENT_CACHE
  19. /*
  20. * DMA-consistent mapping functions for PowerPCs that don't support
  21. * cache snooping. These allocate/free a region of uncached mapped
  22. * memory space for use with DMA devices. Alternatively, you could
  23. * allocate the space "normally" and use the cache management functions
  24. * to ensure it is consistent.
  25. */
  26. extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
  27. extern void __dma_free_coherent(size_t size, void *vaddr);
  28. extern void __dma_sync(void *vaddr, size_t size, int direction);
  29. extern void __dma_sync_page(struct page *page, unsigned long offset,
  30. size_t size, int direction);
  31. #else /* ! CONFIG_NOT_COHERENT_CACHE */
  32. /*
  33. * Cache coherent cores.
  34. */
  35. #define __dma_alloc_coherent(gfp, size, handle) NULL
  36. #define __dma_free_coherent(size, addr) do { } while (0)
  37. #define __dma_sync(addr, size, rw) do { } while (0)
  38. #define __dma_sync_page(pg, off, sz, rw) do { } while (0)
  39. #endif /* ! CONFIG_NOT_COHERENT_CACHE */
  40. #ifdef CONFIG_PPC64
  41. extern int dma_supported(struct device *dev, u64 mask);
  42. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  43. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  44. dma_addr_t *dma_handle, gfp_t flag);
  45. extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  46. dma_addr_t dma_handle);
  47. extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  48. size_t size, enum dma_data_direction direction);
  49. extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  50. size_t size, enum dma_data_direction direction);
  51. extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
  52. unsigned long offset, size_t size,
  53. enum dma_data_direction direction);
  54. extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  55. size_t size, enum dma_data_direction direction);
  56. extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  57. enum dma_data_direction direction);
  58. extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  59. int nhwentries, enum dma_data_direction direction);
  60. #else /* CONFIG_PPC64 */
  61. #define dma_supported(dev, mask) (1)
  62. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  63. {
  64. if (!dev->dma_mask || !dma_supported(dev, mask))
  65. return -EIO;
  66. *dev->dma_mask = dma_mask;
  67. return 0;
  68. }
  69. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  70. dma_addr_t * dma_handle,
  71. gfp_t gfp)
  72. {
  73. #ifdef CONFIG_NOT_COHERENT_CACHE
  74. return __dma_alloc_coherent(size, dma_handle, gfp);
  75. #else
  76. void *ret;
  77. /* ignore region specifiers */
  78. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  79. if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  80. gfp |= GFP_DMA;
  81. ret = (void *)__get_free_pages(gfp, get_order(size));
  82. if (ret != NULL) {
  83. memset(ret, 0, size);
  84. *dma_handle = virt_to_bus(ret);
  85. }
  86. return ret;
  87. #endif
  88. }
  89. static inline void
  90. dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  91. dma_addr_t dma_handle)
  92. {
  93. #ifdef CONFIG_NOT_COHERENT_CACHE
  94. __dma_free_coherent(size, vaddr);
  95. #else
  96. free_pages((unsigned long)vaddr, get_order(size));
  97. #endif
  98. }
  99. static inline dma_addr_t
  100. dma_map_single(struct device *dev, void *ptr, size_t size,
  101. enum dma_data_direction direction)
  102. {
  103. BUG_ON(direction == DMA_NONE);
  104. __dma_sync(ptr, size, direction);
  105. return virt_to_bus(ptr);
  106. }
  107. /* We do nothing. */
  108. #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
  109. static inline dma_addr_t
  110. dma_map_page(struct device *dev, struct page *page,
  111. unsigned long offset, size_t size,
  112. enum dma_data_direction direction)
  113. {
  114. BUG_ON(direction == DMA_NONE);
  115. __dma_sync_page(page, offset, size, direction);
  116. return page_to_bus(page) + offset;
  117. }
  118. /* We do nothing. */
  119. #define dma_unmap_page(dev, handle, size, dir) do { } while (0)
  120. static inline int
  121. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  122. enum dma_data_direction direction)
  123. {
  124. int i;
  125. BUG_ON(direction == DMA_NONE);
  126. for (i = 0; i < nents; i++, sg++) {
  127. BUG_ON(!sg->page);
  128. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  129. sg->dma_address = page_to_bus(sg->page) + sg->offset;
  130. }
  131. return nents;
  132. }
  133. /* We don't do anything here. */
  134. #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
  135. #endif /* CONFIG_PPC64 */
  136. static inline void dma_sync_single_for_cpu(struct device *dev,
  137. dma_addr_t dma_handle, size_t size,
  138. enum dma_data_direction direction)
  139. {
  140. BUG_ON(direction == DMA_NONE);
  141. __dma_sync(bus_to_virt(dma_handle), size, direction);
  142. }
  143. static inline void dma_sync_single_for_device(struct device *dev,
  144. dma_addr_t dma_handle, size_t size,
  145. enum dma_data_direction direction)
  146. {
  147. BUG_ON(direction == DMA_NONE);
  148. __dma_sync(bus_to_virt(dma_handle), size, direction);
  149. }
  150. static inline void dma_sync_sg_for_cpu(struct device *dev,
  151. struct scatterlist *sg, int nents,
  152. enum dma_data_direction direction)
  153. {
  154. int i;
  155. BUG_ON(direction == DMA_NONE);
  156. for (i = 0; i < nents; i++, sg++)
  157. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  158. }
  159. static inline void dma_sync_sg_for_device(struct device *dev,
  160. struct scatterlist *sg, int nents,
  161. enum dma_data_direction direction)
  162. {
  163. int i;
  164. BUG_ON(direction == DMA_NONE);
  165. for (i = 0; i < nents; i++, sg++)
  166. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  167. }
  168. static inline int dma_mapping_error(dma_addr_t dma_addr)
  169. {
  170. #ifdef CONFIG_PPC64
  171. return (dma_addr == DMA_ERROR_CODE);
  172. #else
  173. return 0;
  174. #endif
  175. }
  176. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  177. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  178. #ifdef CONFIG_NOT_COHERENT_CACHE
  179. #define dma_is_consistent(d) (0)
  180. #else
  181. #define dma_is_consistent(d) (1)
  182. #endif
  183. static inline int dma_get_cache_alignment(void)
  184. {
  185. #ifdef CONFIG_PPC64
  186. /* no easy way to get cache size on all processors, so return
  187. * the maximum possible, to be safe */
  188. return (1 << INTERNODE_CACHE_SHIFT);
  189. #else
  190. /*
  191. * Each processor family will define its own L1_CACHE_SHIFT,
  192. * L1_CACHE_BYTES wraps to this, so this is always safe.
  193. */
  194. return L1_CACHE_BYTES;
  195. #endif
  196. }
  197. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  198. dma_addr_t dma_handle, unsigned long offset, size_t size,
  199. enum dma_data_direction direction)
  200. {
  201. /* just sync everything for now */
  202. dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
  203. }
  204. static inline void dma_sync_single_range_for_device(struct device *dev,
  205. dma_addr_t dma_handle, unsigned long offset, size_t size,
  206. enum dma_data_direction direction)
  207. {
  208. /* just sync everything for now */
  209. dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
  210. }
  211. static inline void dma_cache_sync(void *vaddr, size_t size,
  212. enum dma_data_direction direction)
  213. {
  214. BUG_ON(direction == DMA_NONE);
  215. __dma_sync(vaddr, size, (int)direction);
  216. }
  217. /*
  218. * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
  219. */
  220. struct dma_mapping_ops {
  221. void * (*alloc_coherent)(struct device *dev, size_t size,
  222. dma_addr_t *dma_handle, gfp_t flag);
  223. void (*free_coherent)(struct device *dev, size_t size,
  224. void *vaddr, dma_addr_t dma_handle);
  225. dma_addr_t (*map_single)(struct device *dev, void *ptr,
  226. size_t size, enum dma_data_direction direction);
  227. void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
  228. size_t size, enum dma_data_direction direction);
  229. int (*map_sg)(struct device *dev, struct scatterlist *sg,
  230. int nents, enum dma_data_direction direction);
  231. void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
  232. int nents, enum dma_data_direction direction);
  233. int (*dma_supported)(struct device *dev, u64 mask);
  234. int (*dac_dma_supported)(struct device *dev, u64 mask);
  235. };
  236. #endif /* __KERNEL__ */
  237. #endif /* _ASM_DMA_MAPPING_H */