dma-mapping.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #ifndef _ASM_IA64_DMA_MAPPING_H
  2. #define _ASM_IA64_DMA_MAPPING_H
  3. /*
  4. * Copyright (C) 2003-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. */
  7. #include <asm/machvec.h>
  8. #include <linux/scatterlist.h>
  9. #include <asm/swiotlb.h>
  10. extern struct dma_map_ops *dma_ops;
  11. extern struct ia64_machine_vector ia64_mv;
  12. extern void set_iommu_machvec(void);
  13. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  14. dma_addr_t *daddr, gfp_t gfp)
  15. {
  16. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  17. return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
  18. }
  19. static inline void dma_free_coherent(struct device *dev, size_t size,
  20. void *caddr, dma_addr_t daddr)
  21. {
  22. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  23. ops->free_coherent(dev, size, caddr, daddr);
  24. }
  25. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  26. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  27. static inline dma_addr_t dma_map_single_attrs(struct device *dev,
  28. void *caddr, size_t size,
  29. enum dma_data_direction dir,
  30. struct dma_attrs *attrs)
  31. {
  32. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  33. return ops->map_page(dev, virt_to_page(caddr),
  34. (unsigned long)caddr & ~PAGE_MASK, size,
  35. dir, attrs);
  36. }
  37. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
  38. size_t size,
  39. enum dma_data_direction dir,
  40. struct dma_attrs *attrs)
  41. {
  42. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  43. ops->unmap_page(dev, daddr, size, dir, attrs);
  44. }
  45. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
  46. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
  47. static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
  48. int nents, enum dma_data_direction dir,
  49. struct dma_attrs *attrs)
  50. {
  51. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  52. return ops->map_sg(dev, sgl, nents, dir, attrs);
  53. }
  54. static inline void dma_unmap_sg_attrs(struct device *dev,
  55. struct scatterlist *sgl, int nents,
  56. enum dma_data_direction dir,
  57. struct dma_attrs *attrs)
  58. {
  59. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  60. ops->unmap_sg(dev, sgl, nents, dir, attrs);
  61. }
  62. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
  63. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
  64. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
  65. size_t size,
  66. enum dma_data_direction dir)
  67. {
  68. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  69. ops->sync_single_for_cpu(dev, daddr, size, dir);
  70. }
  71. static inline void dma_sync_sg_for_cpu(struct device *dev,
  72. struct scatterlist *sgl,
  73. int nents, enum dma_data_direction dir)
  74. {
  75. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  76. ops->sync_sg_for_cpu(dev, sgl, nents, dir);
  77. }
  78. static inline void dma_sync_single_for_device(struct device *dev,
  79. dma_addr_t daddr,
  80. size_t size,
  81. enum dma_data_direction dir)
  82. {
  83. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  84. ops->sync_single_for_device(dev, daddr, size, dir);
  85. }
  86. static inline void dma_sync_sg_for_device(struct device *dev,
  87. struct scatterlist *sgl,
  88. int nents,
  89. enum dma_data_direction dir)
  90. {
  91. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  92. ops->sync_sg_for_device(dev, sgl, nents, dir);
  93. }
  94. static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
  95. {
  96. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  97. return ops->mapping_error(dev, daddr);
  98. }
  99. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  100. size_t offset, size_t size,
  101. enum dma_data_direction dir)
  102. {
  103. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  104. return ops->map_page(dev, page, offset, size, dir, NULL);
  105. }
  106. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  107. size_t size, enum dma_data_direction dir)
  108. {
  109. dma_unmap_single(dev, addr, size, dir);
  110. }
  111. /*
  112. * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
  113. * See Documentation/DMA-API.txt for details.
  114. */
  115. #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
  116. dma_sync_single_for_cpu(dev, dma_handle, size, dir)
  117. #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
  118. dma_sync_single_for_device(dev, dma_handle, size, dir)
  119. static inline int dma_supported(struct device *dev, u64 mask)
  120. {
  121. struct dma_map_ops *ops = platform_dma_get_ops(dev);
  122. return ops->dma_supported(dev, mask);
  123. }
  124. static inline int
  125. dma_set_mask (struct device *dev, u64 mask)
  126. {
  127. if (!dev->dma_mask || !dma_supported(dev, mask))
  128. return -EIO;
  129. *dev->dma_mask = mask;
  130. return 0;
  131. }
  132. extern int dma_get_cache_alignment(void);
  133. static inline void
  134. dma_cache_sync (struct device *dev, void *vaddr, size_t size,
  135. enum dma_data_direction dir)
  136. {
  137. /*
  138. * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
  139. * ensure that dma_cache_sync() enforces order, hence the mb().
  140. */
  141. mb();
  142. }
  143. #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
  144. #endif /* _ASM_IA64_DMA_MAPPING_H */