dma-mapping.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. #ifndef _ASM_SPARC64_DMA_MAPPING_H
  2. #define _ASM_SPARC64_DMA_MAPPING_H
  3. #ifdef CONFIG_PCI
  4. /* we implement the API below in terms of the existing PCI one,
  5. * so include it */
  6. #include <linux/pci.h>
  7. /* need struct page definitions */
  8. #include <linux/mm.h>
  9. static inline int
  10. dma_supported(struct device *dev, u64 mask)
  11. {
  12. BUG_ON(dev->bus != &pci_bus_type);
  13. return pci_dma_supported(to_pci_dev(dev), mask);
  14. }
  15. static inline int
  16. dma_set_mask(struct device *dev, u64 dma_mask)
  17. {
  18. BUG_ON(dev->bus != &pci_bus_type);
  19. return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
  20. }
  21. static inline void *
  22. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  23. gfp_t flag)
  24. {
  25. BUG_ON(dev->bus != &pci_bus_type);
  26. return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
  27. }
  28. static inline void
  29. dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
  30. dma_addr_t dma_handle)
  31. {
  32. BUG_ON(dev->bus != &pci_bus_type);
  33. pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
  34. }
  35. static inline dma_addr_t
  36. dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  37. enum dma_data_direction direction)
  38. {
  39. BUG_ON(dev->bus != &pci_bus_type);
  40. return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
  41. }
  42. static inline void
  43. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  44. enum dma_data_direction direction)
  45. {
  46. BUG_ON(dev->bus != &pci_bus_type);
  47. pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
  48. }
  49. static inline dma_addr_t
  50. dma_map_page(struct device *dev, struct page *page,
  51. unsigned long offset, size_t size,
  52. enum dma_data_direction direction)
  53. {
  54. BUG_ON(dev->bus != &pci_bus_type);
  55. return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
  56. }
  57. static inline void
  58. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  59. enum dma_data_direction direction)
  60. {
  61. BUG_ON(dev->bus != &pci_bus_type);
  62. pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
  63. }
  64. static inline int
  65. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  66. enum dma_data_direction direction)
  67. {
  68. BUG_ON(dev->bus != &pci_bus_type);
  69. return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
  70. }
  71. static inline void
  72. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  73. enum dma_data_direction direction)
  74. {
  75. BUG_ON(dev->bus != &pci_bus_type);
  76. pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
  77. }
  78. static inline void
  79. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  80. enum dma_data_direction direction)
  81. {
  82. BUG_ON(dev->bus != &pci_bus_type);
  83. pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
  84. size, (int)direction);
  85. }
  86. static inline void
  87. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  88. enum dma_data_direction direction)
  89. {
  90. BUG_ON(dev->bus != &pci_bus_type);
  91. pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
  92. size, (int)direction);
  93. }
  94. static inline void
  95. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  96. enum dma_data_direction direction)
  97. {
  98. BUG_ON(dev->bus != &pci_bus_type);
  99. pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
  100. }
  101. static inline void
  102. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  103. enum dma_data_direction direction)
  104. {
  105. BUG_ON(dev->bus != &pci_bus_type);
  106. pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
  107. }
  108. static inline int
  109. dma_mapping_error(dma_addr_t dma_addr)
  110. {
  111. return pci_dma_mapping_error(dma_addr);
  112. }
  113. #else
  114. struct device;
  115. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  116. dma_addr_t *dma_handle, gfp_t flag)
  117. {
  118. BUG();
  119. return NULL;
  120. }
  121. static inline void dma_free_coherent(struct device *dev, size_t size,
  122. void *vaddr, dma_addr_t dma_handle)
  123. {
  124. BUG();
  125. }
  126. static inline void
  127. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  128. enum dma_data_direction direction)
  129. {
  130. BUG();
  131. }
  132. static inline void
  133. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  134. enum dma_data_direction direction)
  135. {
  136. BUG();
  137. }
  138. #endif /* PCI */
  139. /* Now for the API extensions over the pci_ one */
  140. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  141. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  142. #define dma_is_consistent(d) (1)
  143. static inline int
  144. dma_get_cache_alignment(void)
  145. {
  146. /* no easy way to get cache size on all processors, so return
  147. * the maximum possible, to be safe */
  148. return (1 << INTERNODE_CACHE_SHIFT);
  149. }
  150. static inline void
  151. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  152. unsigned long offset, size_t size,
  153. enum dma_data_direction direction)
  154. {
  155. /* just sync everything, that's all the pci API can do */
  156. dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
  157. }
  158. static inline void
  159. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  160. unsigned long offset, size_t size,
  161. enum dma_data_direction direction)
  162. {
  163. /* just sync everything, that's all the pci API can do */
  164. dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
  165. }
  166. static inline void
  167. dma_cache_sync(void *vaddr, size_t size,
  168. enum dma_data_direction direction)
  169. {
  170. /* could define this in terms of the dma_cache ... operations,
  171. * but if you get this on a platform, you should convert the platform
  172. * to using the generic device DMA API */
  173. BUG();
  174. }
  175. #endif /* _ASM_SPARC64_DMA_MAPPING_H */