dma-mapping.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #ifndef _ASM_IA64_DMA_MAPPING_H
  2. #define _ASM_IA64_DMA_MAPPING_H
  3. /*
  4. * Copyright (C) 2003-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <asm/machvec.h>
  8. #include <linux/scatterlist.h>
  9. #include <asm/swiotlb.h>
  10. #define ARCH_HAS_DMA_GET_REQUIRED_MASK
  11. extern struct dma_map_ops *dma_ops;
  12. extern struct ia64_machine_vector ia64_mv;
  13. extern void set_iommu_machvec(void);
  14. extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
  15. enum dma_data_direction);
  16. extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
  17. enum dma_data_direction);
  18. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  19. dma_addr_t *daddr, gfp_t gfp)
  20. {
  21. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  22. return ops->alloc_coherent(dev, size, daddr, gfp);
  23. }
  24. static inline void dma_free_coherent(struct device *dev, size_t size,
  25. void *caddr, dma_addr_t daddr)
  26. {
  27. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  28. ops->free_coherent(dev, size, caddr, daddr);
  29. }
  30. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  31. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  32. static inline dma_addr_t dma_map_single_attrs(struct device *dev,
  33. void *caddr, size_t size,
  34. enum dma_data_direction dir,
  35. struct dma_attrs *attrs)
  36. {
  37. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  38. return ops->map_page(dev, virt_to_page(caddr),
  39. (unsigned long)caddr & ~PAGE_MASK, size,
  40. dir, attrs);
  41. }
  42. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
  43. size_t size,
  44. enum dma_data_direction dir,
  45. struct dma_attrs *attrs)
  46. {
  47. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  48. ops->unmap_page(dev, daddr, size, dir, attrs);
  49. }
  50. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
  51. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
  52. static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
  53. int nents, enum dma_data_direction dir,
  54. struct dma_attrs *attrs)
  55. {
  56. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  57. return ops->map_sg(dev, sgl, nents, dir, attrs);
  58. }
  59. static inline void dma_unmap_sg_attrs(struct device *dev,
  60. struct scatterlist *sgl, int nents,
  61. enum dma_data_direction dir,
  62. struct dma_attrs *attrs)
  63. {
  64. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  65. ops->unmap_sg(dev, sgl, nents, dir, attrs);
  66. }
  67. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
  68. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
  69. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
  70. size_t size,
  71. enum dma_data_direction dir)
  72. {
  73. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  74. ops->sync_single_for_cpu(dev, daddr, size, dir);
  75. }
  76. static inline void dma_sync_sg_for_cpu(struct device *dev,
  77. struct scatterlist *sgl,
  78. int nents, enum dma_data_direction dir)
  79. {
  80. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  81. ops->sync_sg_for_cpu(dev, sgl, nents, dir);
  82. }
  83. static inline void dma_sync_single_for_device(struct device *dev,
  84. dma_addr_t daddr,
  85. size_t size,
  86. enum dma_data_direction dir)
  87. {
  88. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  89. ops->sync_single_for_device(dev, daddr, size, dir);
  90. }
  91. static inline void dma_sync_sg_for_device(struct device *dev,
  92. struct scatterlist *sgl,
  93. int nents,
  94. enum dma_data_direction dir)
  95. {
  96. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  97. ops->sync_sg_for_device(dev, sgl, nents, dir);
  98. }
  99. static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
  100. {
  101. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  102. return ops->mapping_error(dev, daddr);
  103. }
  104. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  105. size_t offset, size_t size,
  106. enum dma_data_direction dir)
  107. {
  108. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  109. return ops->map_page(dev, page, offset, size, dir, NULL);
  110. }
  111. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  112. size_t size, enum dma_data_direction dir)
  113. {
  114. dma_unmap_single(dev, addr, size, dir);
  115. }
  116. /*
  117. * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
  118. * See Documentation/DMA-API.txt for details.
  119. */
  120. #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
  121. dma_sync_single_for_cpu(dev, dma_handle, size, dir)
  122. #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
  123. dma_sync_single_for_device(dev, dma_handle, size, dir)
  124. static inline int dma_supported(struct device *dev, u64 mask)
  125. {
  126. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  127. return ops->dma_supported(dev, mask);
  128. }
  129. static inline int
  130. dma_set_mask (struct device *dev, u64 mask)
  131. {
  132. if (!dev->dma_mask || !dma_supported(dev, mask))
  133. return -EIO;
  134. *dev->dma_mask = mask;
  135. return 0;
  136. }
  137. extern int dma_get_cache_alignment(void);
  138. static inline void
  139. dma_cache_sync (struct device *dev, void *vaddr, size_t size,
  140. enum dma_data_direction dir)
  141. {
  142. /*
  143. * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
  144. * ensure that dma_cache_sync() enforces order, hence the mb().
  145. */
  146. mb();
  147. }
  148. #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
  149. #endif /* _ASM_IA64_DMA_MAPPING_H */