dma-mapping.h 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Copyright (C) 2004 IBM
  3. *
  4. * Implements the generic device dma API for powerpc.
  5. * the pci and vio busses
  6. */
  7. #ifndef _ASM_DMA_MAPPING_H
  8. #define _ASM_DMA_MAPPING_H
  9. #ifdef __KERNEL__
  10. #include <linux/types.h>
  11. #include <linux/cache.h>
  12. /* need struct page definitions */
  13. #include <linux/mm.h>
  14. #include <asm/scatterlist.h>
  15. #include <asm/io.h>
  16. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  17. #ifdef CONFIG_NOT_COHERENT_CACHE
  18. /*
  19. * DMA-consistent mapping functions for PowerPCs that don't support
  20. * cache snooping. These allocate/free a region of uncached mapped
  21. * memory space for use with DMA devices. Alternatively, you could
  22. * allocate the space "normally" and use the cache management functions
  23. * to ensure it is consistent.
  24. */
  25. extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
  26. extern void __dma_free_coherent(size_t size, void *vaddr);
  27. extern void __dma_sync(void *vaddr, size_t size, int direction);
  28. extern void __dma_sync_page(struct page *page, unsigned long offset,
  29. size_t size, int direction);
  30. #else /* ! CONFIG_NOT_COHERENT_CACHE */
  31. /*
  32. * Cache coherent cores.
  33. */
  34. #define __dma_alloc_coherent(gfp, size, handle) NULL
  35. #define __dma_free_coherent(size, addr) do { } while (0)
  36. #define __dma_sync(addr, size, rw) do { } while (0)
  37. #define __dma_sync_page(pg, off, sz, rw) do { } while (0)
  38. #endif /* ! CONFIG_NOT_COHERENT_CACHE */
  39. #ifdef CONFIG_PPC64
  40. extern int dma_supported(struct device *dev, u64 mask);
  41. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  42. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  43. dma_addr_t *dma_handle, gfp_t flag);
  44. extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  45. dma_addr_t dma_handle);
  46. extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  47. size_t size, enum dma_data_direction direction);
  48. extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  49. size_t size, enum dma_data_direction direction);
  50. extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
  51. unsigned long offset, size_t size,
  52. enum dma_data_direction direction);
  53. extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  54. size_t size, enum dma_data_direction direction);
  55. extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  56. enum dma_data_direction direction);
  57. extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  58. int nhwentries, enum dma_data_direction direction);
  59. #else /* CONFIG_PPC64 */
  60. #define dma_supported(dev, mask) (1)
  61. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  62. {
  63. if (!dev->dma_mask || !dma_supported(dev, mask))
  64. return -EIO;
  65. *dev->dma_mask = dma_mask;
  66. return 0;
  67. }
  68. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  69. dma_addr_t * dma_handle,
  70. gfp_t gfp)
  71. {
  72. #ifdef CONFIG_NOT_COHERENT_CACHE
  73. return __dma_alloc_coherent(size, dma_handle, gfp);
  74. #else
  75. void *ret;
  76. /* ignore region specifiers */
  77. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  78. if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
  79. gfp |= GFP_DMA;
  80. ret = (void *)__get_free_pages(gfp, get_order(size));
  81. if (ret != NULL) {
  82. memset(ret, 0, size);
  83. *dma_handle = virt_to_bus(ret);
  84. }
  85. return ret;
  86. #endif
  87. }
  88. static inline void
  89. dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  90. dma_addr_t dma_handle)
  91. {
  92. #ifdef CONFIG_NOT_COHERENT_CACHE
  93. __dma_free_coherent(size, vaddr);
  94. #else
  95. free_pages((unsigned long)vaddr, get_order(size));
  96. #endif
  97. }
  98. static inline dma_addr_t
  99. dma_map_single(struct device *dev, void *ptr, size_t size,
  100. enum dma_data_direction direction)
  101. {
  102. BUG_ON(direction == DMA_NONE);
  103. __dma_sync(ptr, size, direction);
  104. return virt_to_bus(ptr);
  105. }
  106. /* We do nothing. */
  107. #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
  108. static inline dma_addr_t
  109. dma_map_page(struct device *dev, struct page *page,
  110. unsigned long offset, size_t size,
  111. enum dma_data_direction direction)
  112. {
  113. BUG_ON(direction == DMA_NONE);
  114. __dma_sync_page(page, offset, size, direction);
  115. return page_to_bus(page) + offset;
  116. }
  117. /* We do nothing. */
  118. #define dma_unmap_page(dev, handle, size, dir) do { } while (0)
  119. static inline int
  120. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  121. enum dma_data_direction direction)
  122. {
  123. int i;
  124. BUG_ON(direction == DMA_NONE);
  125. for (i = 0; i < nents; i++, sg++) {
  126. BUG_ON(!sg->page);
  127. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  128. sg->dma_address = page_to_bus(sg->page) + sg->offset;
  129. }
  130. return nents;
  131. }
  132. /* We don't do anything here. */
  133. #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
  134. #endif /* CONFIG_PPC64 */
  135. static inline void dma_sync_single_for_cpu(struct device *dev,
  136. dma_addr_t dma_handle, size_t size,
  137. enum dma_data_direction direction)
  138. {
  139. BUG_ON(direction == DMA_NONE);
  140. __dma_sync(bus_to_virt(dma_handle), size, direction);
  141. }
  142. static inline void dma_sync_single_for_device(struct device *dev,
  143. dma_addr_t dma_handle, size_t size,
  144. enum dma_data_direction direction)
  145. {
  146. BUG_ON(direction == DMA_NONE);
  147. __dma_sync(bus_to_virt(dma_handle), size, direction);
  148. }
  149. static inline void dma_sync_sg_for_cpu(struct device *dev,
  150. struct scatterlist *sg, int nents,
  151. enum dma_data_direction direction)
  152. {
  153. int i;
  154. BUG_ON(direction == DMA_NONE);
  155. for (i = 0; i < nents; i++, sg++)
  156. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  157. }
  158. static inline void dma_sync_sg_for_device(struct device *dev,
  159. struct scatterlist *sg, int nents,
  160. enum dma_data_direction direction)
  161. {
  162. int i;
  163. BUG_ON(direction == DMA_NONE);
  164. for (i = 0; i < nents; i++, sg++)
  165. __dma_sync_page(sg->page, sg->offset, sg->length, direction);
  166. }
  167. static inline int dma_mapping_error(dma_addr_t dma_addr)
  168. {
  169. #ifdef CONFIG_PPC64
  170. return (dma_addr == DMA_ERROR_CODE);
  171. #else
  172. return 0;
  173. #endif
  174. }
  175. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  176. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  177. #ifdef CONFIG_NOT_COHERENT_CACHE
  178. #define dma_is_consistent(d) (0)
  179. #else
  180. #define dma_is_consistent(d) (1)
  181. #endif
  182. static inline int dma_get_cache_alignment(void)
  183. {
  184. #ifdef CONFIG_PPC64
  185. /* no easy way to get cache size on all processors, so return
  186. * the maximum possible, to be safe */
  187. return (1 << INTERNODE_CACHE_SHIFT);
  188. #else
  189. /*
  190. * Each processor family will define its own L1_CACHE_SHIFT,
  191. * L1_CACHE_BYTES wraps to this, so this is always safe.
  192. */
  193. return L1_CACHE_BYTES;
  194. #endif
  195. }
  196. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  197. dma_addr_t dma_handle, unsigned long offset, size_t size,
  198. enum dma_data_direction direction)
  199. {
  200. /* just sync everything for now */
  201. dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
  202. }
  203. static inline void dma_sync_single_range_for_device(struct device *dev,
  204. dma_addr_t dma_handle, unsigned long offset, size_t size,
  205. enum dma_data_direction direction)
  206. {
  207. /* just sync everything for now */
  208. dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
  209. }
  210. static inline void dma_cache_sync(void *vaddr, size_t size,
  211. enum dma_data_direction direction)
  212. {
  213. BUG_ON(direction == DMA_NONE);
  214. __dma_sync(vaddr, size, (int)direction);
  215. }
  216. /*
  217. * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
  218. */
  219. struct dma_mapping_ops {
  220. void * (*alloc_coherent)(struct device *dev, size_t size,
  221. dma_addr_t *dma_handle, gfp_t flag);
  222. void (*free_coherent)(struct device *dev, size_t size,
  223. void *vaddr, dma_addr_t dma_handle);
  224. dma_addr_t (*map_single)(struct device *dev, void *ptr,
  225. size_t size, enum dma_data_direction direction);
  226. void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
  227. size_t size, enum dma_data_direction direction);
  228. int (*map_sg)(struct device *dev, struct scatterlist *sg,
  229. int nents, enum dma_data_direction direction);
  230. void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
  231. int nents, enum dma_data_direction direction);
  232. int (*dma_supported)(struct device *dev, u64 mask);
  233. int (*dac_dma_supported)(struct device *dev, u64 mask);
  234. };
  235. #endif /* __KERNEL__ */
  236. #endif /* _ASM_DMA_MAPPING_H */