dma-mapping_64.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef _X8664_DMA_MAPPING_H
  2. #define _X8664_DMA_MAPPING_H 1
  3. extern dma_addr_t bad_dma_address;
  4. extern const struct dma_mapping_ops* dma_ops;
  5. extern int iommu_merge;
  6. static inline int dma_mapping_error(dma_addr_t dma_addr)
  7. {
  8. if (dma_ops->mapping_error)
  9. return dma_ops->mapping_error(dma_addr);
  10. return (dma_addr == bad_dma_address);
  11. }
  12. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  13. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  14. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  15. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  16. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  17. dma_addr_t *dma_handle, gfp_t gfp);
  18. extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  19. dma_addr_t dma_handle);
  20. static inline dma_addr_t
  21. dma_map_single(struct device *hwdev, void *ptr, size_t size,
  22. int direction)
  23. {
  24. BUG_ON(!valid_dma_direction(direction));
  25. return dma_ops->map_single(hwdev, ptr, size, direction);
  26. }
  27. static inline void
  28. dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
  29. int direction)
  30. {
  31. BUG_ON(!valid_dma_direction(direction));
  32. dma_ops->unmap_single(dev, addr, size, direction);
  33. }
  34. #define dma_map_page(dev,page,offset,size,dir) \
  35. dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  36. #define dma_unmap_page dma_unmap_single
  37. static inline void
  38. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  39. size_t size, int direction)
  40. {
  41. BUG_ON(!valid_dma_direction(direction));
  42. if (dma_ops->sync_single_for_cpu)
  43. dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
  44. direction);
  45. flush_write_buffers();
  46. }
  47. static inline void
  48. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  49. size_t size, int direction)
  50. {
  51. BUG_ON(!valid_dma_direction(direction));
  52. if (dma_ops->sync_single_for_device)
  53. dma_ops->sync_single_for_device(hwdev, dma_handle, size,
  54. direction);
  55. flush_write_buffers();
  56. }
  57. static inline void
  58. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  59. unsigned long offset, size_t size, int direction)
  60. {
  61. BUG_ON(!valid_dma_direction(direction));
  62. if (dma_ops->sync_single_range_for_cpu) {
  63. dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
  64. }
  65. flush_write_buffers();
  66. }
  67. static inline void
  68. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  69. unsigned long offset, size_t size, int direction)
  70. {
  71. BUG_ON(!valid_dma_direction(direction));
  72. if (dma_ops->sync_single_range_for_device)
  73. dma_ops->sync_single_range_for_device(hwdev, dma_handle,
  74. offset, size, direction);
  75. flush_write_buffers();
  76. }
  77. static inline void
  78. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  79. int nelems, int direction)
  80. {
  81. BUG_ON(!valid_dma_direction(direction));
  82. if (dma_ops->sync_sg_for_cpu)
  83. dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  84. flush_write_buffers();
  85. }
  86. static inline void
  87. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  88. int nelems, int direction)
  89. {
  90. BUG_ON(!valid_dma_direction(direction));
  91. if (dma_ops->sync_sg_for_device) {
  92. dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  93. }
  94. flush_write_buffers();
  95. }
  96. static inline int
  97. dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
  98. {
  99. BUG_ON(!valid_dma_direction(direction));
  100. return dma_ops->map_sg(hwdev, sg, nents, direction);
  101. }
  102. static inline void
  103. dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
  104. int direction)
  105. {
  106. BUG_ON(!valid_dma_direction(direction));
  107. dma_ops->unmap_sg(hwdev, sg, nents, direction);
  108. }
  109. extern int dma_supported(struct device *hwdev, u64 mask);
  110. /* same for gart, swiotlb, and nommu */
  111. static inline int dma_get_cache_alignment(void)
  112. {
  113. return boot_cpu_data.x86_clflush_size;
  114. }
  115. #define dma_is_consistent(d, h) 1
  116. extern int dma_set_mask(struct device *dev, u64 mask);
  117. static inline void
  118. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  119. enum dma_data_direction dir)
  120. {
  121. flush_write_buffers();
  122. }
  123. extern struct device fallback_dev;
  124. extern int panic_on_overflow;
  125. #endif /* _X8664_DMA_MAPPING_H */