dma-mapping_64.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. #ifndef _X8664_DMA_MAPPING_H
  2. #define _X8664_DMA_MAPPING_H 1
  3. extern dma_addr_t bad_dma_address;
  4. extern int iommu_merge;
  5. static inline int dma_mapping_error(dma_addr_t dma_addr)
  6. {
  7. if (dma_ops->mapping_error)
  8. return dma_ops->mapping_error(dma_addr);
  9. return (dma_addr == bad_dma_address);
  10. }
  11. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  12. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  13. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  14. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  15. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t gfp);
  17. extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  18. dma_addr_t dma_handle);
  19. static inline void
  20. dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
  21. int direction)
  22. {
  23. BUG_ON(!valid_dma_direction(direction));
  24. dma_ops->unmap_single(dev, addr, size, direction);
  25. }
  26. #define dma_map_page(dev,page,offset,size,dir) \
  27. dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  28. #define dma_unmap_page dma_unmap_single
  29. static inline void
  30. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  31. size_t size, int direction)
  32. {
  33. BUG_ON(!valid_dma_direction(direction));
  34. if (dma_ops->sync_single_for_cpu)
  35. dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
  36. direction);
  37. flush_write_buffers();
  38. }
  39. static inline void
  40. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  41. size_t size, int direction)
  42. {
  43. BUG_ON(!valid_dma_direction(direction));
  44. if (dma_ops->sync_single_for_device)
  45. dma_ops->sync_single_for_device(hwdev, dma_handle, size,
  46. direction);
  47. flush_write_buffers();
  48. }
  49. static inline void
  50. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  51. unsigned long offset, size_t size, int direction)
  52. {
  53. BUG_ON(!valid_dma_direction(direction));
  54. if (dma_ops->sync_single_range_for_cpu) {
  55. dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
  56. }
  57. flush_write_buffers();
  58. }
  59. static inline void
  60. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  61. unsigned long offset, size_t size, int direction)
  62. {
  63. BUG_ON(!valid_dma_direction(direction));
  64. if (dma_ops->sync_single_range_for_device)
  65. dma_ops->sync_single_range_for_device(hwdev, dma_handle,
  66. offset, size, direction);
  67. flush_write_buffers();
  68. }
  69. static inline void
  70. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  71. int nelems, int direction)
  72. {
  73. BUG_ON(!valid_dma_direction(direction));
  74. if (dma_ops->sync_sg_for_cpu)
  75. dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  76. flush_write_buffers();
  77. }
  78. static inline void
  79. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  80. int nelems, int direction)
  81. {
  82. BUG_ON(!valid_dma_direction(direction));
  83. if (dma_ops->sync_sg_for_device) {
  84. dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  85. }
  86. flush_write_buffers();
  87. }
  88. static inline int
  89. dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
  90. {
  91. BUG_ON(!valid_dma_direction(direction));
  92. return dma_ops->map_sg(hwdev, sg, nents, direction);
  93. }
  94. static inline void
  95. dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
  96. int direction)
  97. {
  98. BUG_ON(!valid_dma_direction(direction));
  99. dma_ops->unmap_sg(hwdev, sg, nents, direction);
  100. }
  101. extern int dma_supported(struct device *hwdev, u64 mask);
  102. /* same for gart, swiotlb, and nommu */
  103. static inline int dma_get_cache_alignment(void)
  104. {
  105. return boot_cpu_data.x86_clflush_size;
  106. }
  107. #define dma_is_consistent(d, h) 1
  108. extern int dma_set_mask(struct device *dev, u64 mask);
  109. static inline void
  110. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  111. enum dma_data_direction dir)
  112. {
  113. flush_write_buffers();
  114. }
  115. extern struct device fallback_dev;
  116. extern int panic_on_overflow;
  117. #endif /* _X8664_DMA_MAPPING_H */