dma-mapping.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #ifndef ___ASM_SPARC_DMA_MAPPING_H
  2. #define ___ASM_SPARC_DMA_MAPPING_H
  3. #include <linux/scatterlist.h>
  4. #include <linux/mm.h>
  5. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  6. extern int dma_supported(struct device *dev, u64 mask);
  7. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. #define dma_is_consistent(d, h) (1)
  11. struct dma_ops {
  12. void *(*alloc_coherent)(struct device *dev, size_t size,
  13. dma_addr_t *dma_handle, gfp_t flag);
  14. void (*free_coherent)(struct device *dev, size_t size,
  15. void *cpu_addr, dma_addr_t dma_handle);
  16. dma_addr_t (*map_page)(struct device *dev, struct page *page,
  17. unsigned long offset, size_t size,
  18. enum dma_data_direction direction);
  19. void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
  20. size_t size,
  21. enum dma_data_direction direction);
  22. int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
  23. enum dma_data_direction direction);
  24. void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
  25. int nhwentries,
  26. enum dma_data_direction direction);
  27. void (*sync_single_for_cpu)(struct device *dev,
  28. dma_addr_t dma_handle, size_t size,
  29. enum dma_data_direction direction);
  30. void (*sync_single_for_device)(struct device *dev,
  31. dma_addr_t dma_handle, size_t size,
  32. enum dma_data_direction direction);
  33. void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
  34. int nelems,
  35. enum dma_data_direction direction);
  36. void (*sync_sg_for_device)(struct device *dev,
  37. struct scatterlist *sg, int nents,
  38. enum dma_data_direction dir);
  39. };
  40. extern const struct dma_ops *dma_ops;
  41. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  42. dma_addr_t *dma_handle, gfp_t flag)
  43. {
  44. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  45. }
  46. static inline void dma_free_coherent(struct device *dev, size_t size,
  47. void *cpu_addr, dma_addr_t dma_handle)
  48. {
  49. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  50. }
  51. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  52. size_t size,
  53. enum dma_data_direction direction)
  54. {
  55. return dma_ops->map_page(dev, virt_to_page(cpu_addr),
  56. (unsigned long)cpu_addr & ~PAGE_MASK, size,
  57. direction);
  58. }
  59. static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  60. size_t size,
  61. enum dma_data_direction direction)
  62. {
  63. dma_ops->unmap_page(dev, dma_addr, size, direction);
  64. }
  65. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  66. unsigned long offset, size_t size,
  67. enum dma_data_direction direction)
  68. {
  69. return dma_ops->map_page(dev, page, offset, size, direction);
  70. }
  71. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  72. size_t size,
  73. enum dma_data_direction direction)
  74. {
  75. dma_ops->unmap_page(dev, dma_address, size, direction);
  76. }
  77. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  78. int nents, enum dma_data_direction direction)
  79. {
  80. return dma_ops->map_sg(dev, sg, nents, direction);
  81. }
  82. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  83. int nents, enum dma_data_direction direction)
  84. {
  85. dma_ops->unmap_sg(dev, sg, nents, direction);
  86. }
  87. static inline void dma_sync_single_for_cpu(struct device *dev,
  88. dma_addr_t dma_handle, size_t size,
  89. enum dma_data_direction direction)
  90. {
  91. dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
  92. }
  93. static inline void dma_sync_single_for_device(struct device *dev,
  94. dma_addr_t dma_handle,
  95. size_t size,
  96. enum dma_data_direction direction)
  97. {
  98. if (dma_ops->sync_single_for_device)
  99. dma_ops->sync_single_for_device(dev, dma_handle, size,
  100. direction);
  101. }
  102. static inline void dma_sync_sg_for_cpu(struct device *dev,
  103. struct scatterlist *sg, int nelems,
  104. enum dma_data_direction direction)
  105. {
  106. dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
  107. }
  108. static inline void dma_sync_sg_for_device(struct device *dev,
  109. struct scatterlist *sg, int nelems,
  110. enum dma_data_direction direction)
  111. {
  112. if (dma_ops->sync_sg_for_device)
  113. dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
  114. }
  115. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  116. dma_addr_t dma_handle,
  117. unsigned long offset,
  118. size_t size,
  119. enum dma_data_direction dir)
  120. {
  121. dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
  122. }
  123. static inline void dma_sync_single_range_for_device(struct device *dev,
  124. dma_addr_t dma_handle,
  125. unsigned long offset,
  126. size_t size,
  127. enum dma_data_direction dir)
  128. {
  129. dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
  130. }
  131. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  132. {
  133. return (dma_addr == DMA_ERROR_CODE);
  134. }
  135. static inline int dma_get_cache_alignment(void)
  136. {
  137. /*
  138. * no easy way to get cache size on all processors, so return
  139. * the maximum possible, to be safe
  140. */
  141. return (1 << INTERNODE_CACHE_SHIFT);
  142. }
  143. #endif