dma-mapping.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef _LINUX_DMA_MAPPING_H
  2. #define _LINUX_DMA_MAPPING_H
  3. #include <linux/device.h>
  4. #include <linux/err.h>
  5. /* These definitions mirror those in pci.h, so they can be used
  6. * interchangeably with their PCI_ counterparts */
  7. enum dma_data_direction {
  8. DMA_BIDIRECTIONAL = 0,
  9. DMA_TO_DEVICE = 1,
  10. DMA_FROM_DEVICE = 2,
  11. DMA_NONE = 3,
  12. };
  13. #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  14. /*
  15. * NOTE: do not use the below macros in new code and do not add new definitions
  16. * here.
  17. *
  18. * Instead, just open-code DMA_BIT_MASK(n) within your driver
  19. */
  20. #define DMA_64BIT_MASK DMA_BIT_MASK(64)
  21. #define DMA_48BIT_MASK DMA_BIT_MASK(48)
  22. #define DMA_47BIT_MASK DMA_BIT_MASK(47)
  23. #define DMA_40BIT_MASK DMA_BIT_MASK(40)
  24. #define DMA_39BIT_MASK DMA_BIT_MASK(39)
  25. #define DMA_35BIT_MASK DMA_BIT_MASK(35)
  26. #define DMA_32BIT_MASK DMA_BIT_MASK(32)
  27. #define DMA_31BIT_MASK DMA_BIT_MASK(31)
  28. #define DMA_30BIT_MASK DMA_BIT_MASK(30)
  29. #define DMA_29BIT_MASK DMA_BIT_MASK(29)
  30. #define DMA_28BIT_MASK DMA_BIT_MASK(28)
  31. #define DMA_24BIT_MASK DMA_BIT_MASK(24)
  32. #define DMA_MASK_NONE 0x0ULL
  33. static inline int valid_dma_direction(int dma_direction)
  34. {
  35. return ((dma_direction == DMA_BIDIRECTIONAL) ||
  36. (dma_direction == DMA_TO_DEVICE) ||
  37. (dma_direction == DMA_FROM_DEVICE));
  38. }
  39. static inline int is_device_dma_capable(struct device *dev)
  40. {
  41. return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  42. }
  43. static inline int is_buffer_dma_capable(u64 mask, dma_addr_t addr, size_t size)
  44. {
  45. return addr + size <= mask;
  46. }
  47. #ifdef CONFIG_HAS_DMA
  48. #include <asm/dma-mapping.h>
  49. #else
  50. #include <asm-generic/dma-mapping-broken.h>
  51. #endif
  52. /* Backwards compat, remove in 2.7.x */
  53. #define dma_sync_single dma_sync_single_for_cpu
  54. #define dma_sync_sg dma_sync_sg_for_cpu
  55. static inline u64 dma_get_mask(struct device *dev)
  56. {
  57. if (dev && dev->dma_mask && *dev->dma_mask)
  58. return *dev->dma_mask;
  59. return DMA_32BIT_MASK;
  60. }
  61. extern u64 dma_get_required_mask(struct device *dev);
  62. static inline unsigned int dma_get_max_seg_size(struct device *dev)
  63. {
  64. return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
  65. }
  66. static inline unsigned int dma_set_max_seg_size(struct device *dev,
  67. unsigned int size)
  68. {
  69. if (dev->dma_parms) {
  70. dev->dma_parms->max_segment_size = size;
  71. return 0;
  72. } else
  73. return -EIO;
  74. }
  75. static inline unsigned long dma_get_seg_boundary(struct device *dev)
  76. {
  77. return dev->dma_parms ?
  78. dev->dma_parms->segment_boundary_mask : 0xffffffff;
  79. }
  80. static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  81. {
  82. if (dev->dma_parms) {
  83. dev->dma_parms->segment_boundary_mask = mask;
  84. return 0;
  85. } else
  86. return -EIO;
  87. }
  88. /* flags for the coherent memory api */
  89. #define DMA_MEMORY_MAP 0x01
  90. #define DMA_MEMORY_IO 0x02
  91. #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
  92. #define DMA_MEMORY_EXCLUSIVE 0x08
  93. #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  94. static inline int
  95. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  96. dma_addr_t device_addr, size_t size, int flags)
  97. {
  98. return 0;
  99. }
  100. static inline void
  101. dma_release_declared_memory(struct device *dev)
  102. {
  103. }
  104. static inline void *
  105. dma_mark_declared_memory_occupied(struct device *dev,
  106. dma_addr_t device_addr, size_t size)
  107. {
  108. return ERR_PTR(-EBUSY);
  109. }
  110. #endif
  111. /*
  112. * Managed DMA API
  113. */
  114. extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  115. dma_addr_t *dma_handle, gfp_t gfp);
  116. extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  117. dma_addr_t dma_handle);
  118. extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  119. dma_addr_t *dma_handle, gfp_t gfp);
  120. extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  121. dma_addr_t dma_handle);
  122. #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  123. extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  124. dma_addr_t device_addr, size_t size,
  125. int flags);
  126. extern void dmam_release_declared_memory(struct device *dev);
  127. #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
  128. static inline int dmam_declare_coherent_memory(struct device *dev,
  129. dma_addr_t bus_addr, dma_addr_t device_addr,
  130. size_t size, gfp_t gfp)
  131. {
  132. return 0;
  133. }
  134. static inline void dmam_release_declared_memory(struct device *dev)
  135. {
  136. }
  137. #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
  138. #ifndef CONFIG_HAVE_DMA_ATTRS
  139. struct dma_attrs;
  140. #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
  141. dma_map_single(dev, cpu_addr, size, dir)
  142. #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
  143. dma_unmap_single(dev, dma_addr, size, dir)
  144. #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
  145. dma_map_sg(dev, sgl, nents, dir)
  146. #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
  147. dma_unmap_sg(dev, sgl, nents, dir)
  148. #endif /* CONFIG_HAVE_DMA_ATTRS */
  149. #endif