dma-mapping_64.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #ifndef _X8664_DMA_MAPPING_H
  2. #define _X8664_DMA_MAPPING_H 1
  3. extern dma_addr_t bad_dma_address;
  4. extern int iommu_merge;
  5. static inline int dma_mapping_error(dma_addr_t dma_addr)
  6. {
  7. if (dma_ops->mapping_error)
  8. return dma_ops->mapping_error(dma_addr);
  9. return (dma_addr == bad_dma_address);
  10. }
  11. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  12. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  13. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  14. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  15. extern void *dma_alloc_coherent(struct device *dev, size_t size,
  16. dma_addr_t *dma_handle, gfp_t gfp);
  17. extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  18. dma_addr_t dma_handle);
  19. #define dma_map_page(dev,page,offset,size,dir) \
  20. dma_map_single((dev), page_address(page)+(offset), (size), (dir))
  21. #define dma_unmap_page dma_unmap_single
  22. static inline void
  23. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  24. unsigned long offset, size_t size, int direction)
  25. {
  26. BUG_ON(!valid_dma_direction(direction));
  27. if (dma_ops->sync_single_range_for_cpu) {
  28. dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
  29. }
  30. flush_write_buffers();
  31. }
  32. static inline void
  33. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  34. unsigned long offset, size_t size, int direction)
  35. {
  36. BUG_ON(!valid_dma_direction(direction));
  37. if (dma_ops->sync_single_range_for_device)
  38. dma_ops->sync_single_range_for_device(hwdev, dma_handle,
  39. offset, size, direction);
  40. flush_write_buffers();
  41. }
  42. static inline void
  43. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  44. int nelems, int direction)
  45. {
  46. BUG_ON(!valid_dma_direction(direction));
  47. if (dma_ops->sync_sg_for_cpu)
  48. dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  49. flush_write_buffers();
  50. }
  51. static inline void
  52. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  53. int nelems, int direction)
  54. {
  55. BUG_ON(!valid_dma_direction(direction));
  56. if (dma_ops->sync_sg_for_device) {
  57. dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  58. }
  59. flush_write_buffers();
  60. }
  61. extern int dma_supported(struct device *hwdev, u64 mask);
  62. /* same for gart, swiotlb, and nommu */
  63. static inline int dma_get_cache_alignment(void)
  64. {
  65. return boot_cpu_data.x86_clflush_size;
  66. }
  67. #define dma_is_consistent(d, h) 1
  68. extern int dma_set_mask(struct device *dev, u64 mask);
  69. static inline void
  70. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  71. enum dma_data_direction dir)
  72. {
  73. flush_write_buffers();
  74. }
  75. extern struct device fallback_dev;
  76. extern int panic_on_overflow;
  77. #endif /* _X8664_DMA_MAPPING_H */