dma-mapping.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. #ifndef _ASM_SPARC64_DMA_MAPPING_H
  2. #define _ASM_SPARC64_DMA_MAPPING_H
  3. #include <linux/scatterlist.h>
  4. #include <linux/mm.h>
  5. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  6. struct dma_ops {
  7. void *(*alloc_coherent)(struct device *dev, size_t size,
  8. dma_addr_t *dma_handle, gfp_t flag);
  9. void (*free_coherent)(struct device *dev, size_t size,
  10. void *cpu_addr, dma_addr_t dma_handle);
  11. dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
  12. size_t size,
  13. enum dma_data_direction direction);
  14. void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
  15. size_t size,
  16. enum dma_data_direction direction);
  17. int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
  18. enum dma_data_direction direction);
  19. void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
  20. int nhwentries,
  21. enum dma_data_direction direction);
  22. void (*sync_single_for_cpu)(struct device *dev,
  23. dma_addr_t dma_handle, size_t size,
  24. enum dma_data_direction direction);
  25. void (*sync_single_for_device)(struct device *dev,
  26. dma_addr_t dma_handle, size_t size,
  27. enum dma_data_direction direction);
  28. void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
  29. int nelems,
  30. enum dma_data_direction direction);
  31. void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
  32. int nelems,
  33. enum dma_data_direction direction);
  34. };
  35. extern const struct dma_ops *dma_ops;
  36. extern int dma_supported(struct device *dev, u64 mask);
  37. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  38. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  39. dma_addr_t *dma_handle, gfp_t flag)
  40. {
  41. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  42. }
  43. static inline void dma_free_coherent(struct device *dev, size_t size,
  44. void *cpu_addr, dma_addr_t dma_handle)
  45. {
  46. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  47. }
  48. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  49. size_t size,
  50. enum dma_data_direction direction)
  51. {
  52. return dma_ops->map_single(dev, cpu_addr, size, direction);
  53. }
  54. static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  55. size_t size,
  56. enum dma_data_direction direction)
  57. {
  58. dma_ops->unmap_single(dev, dma_addr, size, direction);
  59. }
  60. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  61. unsigned long offset, size_t size,
  62. enum dma_data_direction direction)
  63. {
  64. return dma_ops->map_single(dev, page_address(page) + offset,
  65. size, direction);
  66. }
  67. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  68. size_t size,
  69. enum dma_data_direction direction)
  70. {
  71. dma_ops->unmap_single(dev, dma_address, size, direction);
  72. }
  73. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  74. int nents, enum dma_data_direction direction)
  75. {
  76. return dma_ops->map_sg(dev, sg, nents, direction);
  77. }
  78. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  79. int nents, enum dma_data_direction direction)
  80. {
  81. dma_ops->unmap_sg(dev, sg, nents, direction);
  82. }
  83. static inline void dma_sync_single_for_cpu(struct device *dev,
  84. dma_addr_t dma_handle, size_t size,
  85. enum dma_data_direction direction)
  86. {
  87. dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
  88. }
  89. static inline void dma_sync_single_for_device(struct device *dev,
  90. dma_addr_t dma_handle,
  91. size_t size,
  92. enum dma_data_direction direction)
  93. {
  94. dma_ops->sync_single_for_device(dev, dma_handle, size, direction);
  95. }
  96. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  97. dma_addr_t dma_handle,
  98. unsigned long offset,
  99. size_t size,
  100. enum dma_data_direction direction)
  101. {
  102. dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
  103. }
  104. static inline void dma_sync_single_range_for_device(struct device *dev,
  105. dma_addr_t dma_handle,
  106. unsigned long offset,
  107. size_t size,
  108. enum dma_data_direction direction)
  109. {
  110. dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
  111. }
  112. static inline void dma_sync_sg_for_cpu(struct device *dev,
  113. struct scatterlist *sg, int nelems,
  114. enum dma_data_direction direction)
  115. {
  116. dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
  117. }
  118. static inline void dma_sync_sg_for_device(struct device *dev,
  119. struct scatterlist *sg, int nelems,
  120. enum dma_data_direction direction)
  121. {
  122. dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
  123. }
  124. static inline int dma_mapping_error(dma_addr_t dma_addr)
  125. {
  126. return (dma_addr == DMA_ERROR_CODE);
  127. }
  128. static inline int dma_get_cache_alignment(void)
  129. {
  130. /* no easy way to get cache size on all processors, so return
  131. * the maximum possible, to be safe */
  132. return (1 << INTERNODE_CACHE_SHIFT);
  133. }
  134. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  135. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  136. #define dma_is_consistent(d, h) (1)
  137. #endif /* _ASM_SPARC64_DMA_MAPPING_H */