dma-mapping_64.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. #ifndef _X8664_DMA_MAPPING_H
  2. #define _X8664_DMA_MAPPING_H 1
  3. extern dma_addr_t bad_dma_address;
  4. extern int iommu_merge;
  5. static inline int dma_mapping_error(dma_addr_t dma_addr)
  6. {
  7. if (dma_ops->mapping_error)
  8. return dma_ops->mapping_error(dma_addr);
  9. return (dma_addr == bad_dma_address);
  10. }
  11. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  12. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  13. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  14. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  15. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t gfp);
  17. extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  18. dma_addr_t dma_handle);
  19. #define dma_map_page(dev,page,offset,size,dir) \
  20. dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  21. #define dma_unmap_page dma_unmap_single
  22. static inline void
  23. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  24. size_t size, int direction)
  25. {
  26. BUG_ON(!valid_dma_direction(direction));
  27. if (dma_ops->sync_single_for_cpu)
  28. dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
  29. direction);
  30. flush_write_buffers();
  31. }
  32. static inline void
  33. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  34. size_t size, int direction)
  35. {
  36. BUG_ON(!valid_dma_direction(direction));
  37. if (dma_ops->sync_single_for_device)
  38. dma_ops->sync_single_for_device(hwdev, dma_handle, size,
  39. direction);
  40. flush_write_buffers();
  41. }
  42. static inline void
  43. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  44. unsigned long offset, size_t size, int direction)
  45. {
  46. BUG_ON(!valid_dma_direction(direction));
  47. if (dma_ops->sync_single_range_for_cpu) {
  48. dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
  49. }
  50. flush_write_buffers();
  51. }
  52. static inline void
  53. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  54. unsigned long offset, size_t size, int direction)
  55. {
  56. BUG_ON(!valid_dma_direction(direction));
  57. if (dma_ops->sync_single_range_for_device)
  58. dma_ops->sync_single_range_for_device(hwdev, dma_handle,
  59. offset, size, direction);
  60. flush_write_buffers();
  61. }
  62. static inline void
  63. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  64. int nelems, int direction)
  65. {
  66. BUG_ON(!valid_dma_direction(direction));
  67. if (dma_ops->sync_sg_for_cpu)
  68. dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  69. flush_write_buffers();
  70. }
  71. static inline void
  72. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  73. int nelems, int direction)
  74. {
  75. BUG_ON(!valid_dma_direction(direction));
  76. if (dma_ops->sync_sg_for_device) {
  77. dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  78. }
  79. flush_write_buffers();
  80. }
  81. extern int dma_supported(struct device *hwdev, u64 mask);
  82. /* same for gart, swiotlb, and nommu */
  83. static inline int dma_get_cache_alignment(void)
  84. {
  85. return boot_cpu_data.x86_clflush_size;
  86. }
  87. #define dma_is_consistent(d, h) 1
  88. extern int dma_set_mask(struct device *dev, u64 mask);
  89. static inline void
  90. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  91. enum dma_data_direction dir)
  92. {
  93. flush_write_buffers();
  94. }
  95. extern struct device fallback_dev;
  96. extern int panic_on_overflow;
  97. #endif /* _X8664_DMA_MAPPING_H */