dma-mapping.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #ifndef ___ASM_SPARC_DMA_MAPPING_H
  2. #define ___ASM_SPARC_DMA_MAPPING_H
  3. #include <linux/scatterlist.h>
  4. #include <linux/mm.h>
  5. #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  6. extern int dma_supported(struct device *dev, u64 mask);
  7. extern int dma_set_mask(struct device *dev, u64 dma_mask);
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. #define dma_is_consistent(d, h) (1)
  11. extern const struct dma_map_ops *dma_ops;
  12. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  13. dma_addr_t *dma_handle, gfp_t flag)
  14. {
  15. return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  16. }
  17. static inline void dma_free_coherent(struct device *dev, size_t size,
  18. void *cpu_addr, dma_addr_t dma_handle)
  19. {
  20. dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  21. }
  22. static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
  23. size_t size,
  24. enum dma_data_direction direction)
  25. {
  26. return dma_ops->map_page(dev, virt_to_page(cpu_addr),
  27. (unsigned long)cpu_addr & ~PAGE_MASK, size,
  28. direction, NULL);
  29. }
  30. static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  31. size_t size,
  32. enum dma_data_direction direction)
  33. {
  34. dma_ops->unmap_page(dev, dma_addr, size, direction, NULL);
  35. }
  36. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  37. unsigned long offset, size_t size,
  38. enum dma_data_direction direction)
  39. {
  40. return dma_ops->map_page(dev, page, offset, size, direction, NULL);
  41. }
  42. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  43. size_t size,
  44. enum dma_data_direction direction)
  45. {
  46. dma_ops->unmap_page(dev, dma_address, size, direction, NULL);
  47. }
  48. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  49. int nents, enum dma_data_direction direction)
  50. {
  51. return dma_ops->map_sg(dev, sg, nents, direction, NULL);
  52. }
  53. static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  54. int nents, enum dma_data_direction direction)
  55. {
  56. dma_ops->unmap_sg(dev, sg, nents, direction, NULL);
  57. }
  58. static inline void dma_sync_single_for_cpu(struct device *dev,
  59. dma_addr_t dma_handle, size_t size,
  60. enum dma_data_direction direction)
  61. {
  62. dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
  63. }
  64. static inline void dma_sync_single_for_device(struct device *dev,
  65. dma_addr_t dma_handle,
  66. size_t size,
  67. enum dma_data_direction direction)
  68. {
  69. if (dma_ops->sync_single_for_device)
  70. dma_ops->sync_single_for_device(dev, dma_handle, size,
  71. direction);
  72. }
  73. static inline void dma_sync_sg_for_cpu(struct device *dev,
  74. struct scatterlist *sg, int nelems,
  75. enum dma_data_direction direction)
  76. {
  77. dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
  78. }
  79. static inline void dma_sync_sg_for_device(struct device *dev,
  80. struct scatterlist *sg, int nelems,
  81. enum dma_data_direction direction)
  82. {
  83. if (dma_ops->sync_sg_for_device)
  84. dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
  85. }
  86. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  87. dma_addr_t dma_handle,
  88. unsigned long offset,
  89. size_t size,
  90. enum dma_data_direction dir)
  91. {
  92. dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
  93. }
  94. static inline void dma_sync_single_range_for_device(struct device *dev,
  95. dma_addr_t dma_handle,
  96. unsigned long offset,
  97. size_t size,
  98. enum dma_data_direction dir)
  99. {
  100. dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
  101. }
  102. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  103. {
  104. return (dma_addr == DMA_ERROR_CODE);
  105. }
  106. static inline int dma_get_cache_alignment(void)
  107. {
  108. /*
  109. * no easy way to get cache size on all processors, so return
  110. * the maximum possible, to be safe
  111. */
  112. return (1 << INTERNODE_CACHE_SHIFT);
  113. }
  114. #endif