dma-mapping.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. #ifndef _X8664_DMA_MAPPING_H
  2. #define _X8664_DMA_MAPPING_H 1
  3. /*
  4. * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
  5. * documentation.
  6. */
  7. #include <linux/config.h>
  8. #include <asm/scatterlist.h>
  9. #include <asm/io.h>
  10. #include <asm/swiotlb.h>
  11. extern dma_addr_t bad_dma_address;
  12. #define dma_mapping_error(x) \
  13. (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address))
  14. void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  15. unsigned gfp);
  16. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  17. dma_addr_t dma_handle);
  18. #ifdef CONFIG_GART_IOMMU
  19. extern dma_addr_t dma_map_single(struct device *hwdev, void *ptr, size_t size,
  20. int direction);
  21. extern void dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
  22. int direction);
  23. #else
  24. /* No IOMMU */
  25. static inline dma_addr_t dma_map_single(struct device *hwdev, void *ptr,
  26. size_t size, int direction)
  27. {
  28. dma_addr_t addr;
  29. if (direction == DMA_NONE)
  30. out_of_line_bug();
  31. addr = virt_to_bus(ptr);
  32. if ((addr+size) & ~*hwdev->dma_mask)
  33. out_of_line_bug();
  34. return addr;
  35. }
  36. static inline void dma_unmap_single(struct device *hwdev, dma_addr_t dma_addr,
  37. size_t size, int direction)
  38. {
  39. if (direction == DMA_NONE)
  40. out_of_line_bug();
  41. /* Nothing to do */
  42. }
  43. #endif
  44. #define dma_map_page(dev,page,offset,size,dir) \
  45. dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  46. static inline void dma_sync_single_for_cpu(struct device *hwdev,
  47. dma_addr_t dma_handle,
  48. size_t size, int direction)
  49. {
  50. if (direction == DMA_NONE)
  51. out_of_line_bug();
  52. if (swiotlb)
  53. return swiotlb_sync_single_for_cpu(hwdev,dma_handle,size,direction);
  54. flush_write_buffers();
  55. }
  56. static inline void dma_sync_single_for_device(struct device *hwdev,
  57. dma_addr_t dma_handle,
  58. size_t size, int direction)
  59. {
  60. if (direction == DMA_NONE)
  61. out_of_line_bug();
  62. if (swiotlb)
  63. return swiotlb_sync_single_for_device(hwdev,dma_handle,size,direction);
  64. flush_write_buffers();
  65. }
  66. static inline void dma_sync_sg_for_cpu(struct device *hwdev,
  67. struct scatterlist *sg,
  68. int nelems, int direction)
  69. {
  70. if (direction == DMA_NONE)
  71. out_of_line_bug();
  72. if (swiotlb)
  73. return swiotlb_sync_sg_for_cpu(hwdev,sg,nelems,direction);
  74. flush_write_buffers();
  75. }
  76. static inline void dma_sync_sg_for_device(struct device *hwdev,
  77. struct scatterlist *sg,
  78. int nelems, int direction)
  79. {
  80. if (direction == DMA_NONE)
  81. out_of_line_bug();
  82. if (swiotlb)
  83. return swiotlb_sync_sg_for_device(hwdev,sg,nelems,direction);
  84. flush_write_buffers();
  85. }
  86. extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
  87. int nents, int direction);
  88. extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
  89. int nents, int direction);
  90. #define dma_unmap_page dma_unmap_single
  91. extern int dma_supported(struct device *hwdev, u64 mask);
  92. extern int dma_get_cache_alignment(void);
  93. #define dma_is_consistent(h) 1
  94. static inline int dma_set_mask(struct device *dev, u64 mask)
  95. {
  96. if (!dev->dma_mask || !dma_supported(dev, mask))
  97. return -EIO;
  98. *dev->dma_mask = mask;
  99. return 0;
  100. }
  101. static inline void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
  102. {
  103. flush_write_buffers();
  104. }
  105. #endif