dma-mapping.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * DMA Mapping glue for ARC
  3. *
  4. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef ASM_ARC_DMA_MAPPING_H
  11. #define ASM_ARC_DMA_MAPPING_H
  12. #include <asm-generic/dma-coherent.h>
  13. #include <asm/cacheflush.h>
  14. #ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
  15. /*
  16. * dma_map_* API take cpu addresses, which is kernel logical address in the
  17. * untranslated address space (0x8000_0000) based. The dma address (bus addr)
  18. * ideally needs to be 0x0000_0000 based hence these glue routines.
  19. * However given that intermediate bus bridges can ignore the high bit, we can
  20. * do with these routines being no-ops.
  21. * If a platform/device comes up which sriclty requires 0 based bus addr
  22. * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
  23. */
  24. #define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
  25. #define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
  26. #else
  27. #include <plat/dma_addr.h>
  28. #endif
  29. void *dma_alloc_noncoherent(struct device *dev, size_t size,
  30. dma_addr_t *dma_handle, gfp_t gfp);
  31. void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  32. dma_addr_t dma_handle);
  33. void *dma_alloc_coherent(struct device *dev, size_t size,
  34. dma_addr_t *dma_handle, gfp_t gfp);
  35. void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
  36. dma_addr_t dma_handle);
  37. /* drivers/base/dma-mapping.c */
  38. extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  39. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  40. extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  41. void *cpu_addr, dma_addr_t dma_addr,
  42. size_t size);
  43. #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
  44. #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
  45. /*
  46. * streaming DMA Mapping API...
  47. * CPU accesses page via normal paddr, thus needs to explicitly made
  48. * consistent before each use
  49. */
  50. static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
  51. enum dma_data_direction dir)
  52. {
  53. switch (dir) {
  54. case DMA_FROM_DEVICE:
  55. dma_cache_inv(paddr, size);
  56. break;
  57. case DMA_TO_DEVICE:
  58. dma_cache_wback(paddr, size);
  59. break;
  60. case DMA_BIDIRECTIONAL:
  61. dma_cache_wback_inv(paddr, size);
  62. break;
  63. default:
  64. pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
  65. }
  66. }
  67. void __arc_dma_cache_sync(unsigned long paddr, size_t size,
  68. enum dma_data_direction dir);
  69. #define _dma_cache_sync(addr, sz, dir) \
  70. do { \
  71. if (__builtin_constant_p(dir)) \
  72. __inline_dma_cache_sync(addr, sz, dir); \
  73. else \
  74. __arc_dma_cache_sync(addr, sz, dir); \
  75. } \
  76. while (0);
  77. static inline dma_addr_t
  78. dma_map_single(struct device *dev, void *cpu_addr, size_t size,
  79. enum dma_data_direction dir)
  80. {
  81. _dma_cache_sync((unsigned long)cpu_addr, size, dir);
  82. return plat_kernel_addr_to_dma(dev, cpu_addr);
  83. }
  84. static inline void
  85. dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
  86. size_t size, enum dma_data_direction dir)
  87. {
  88. }
  89. static inline dma_addr_t
  90. dma_map_page(struct device *dev, struct page *page,
  91. unsigned long offset, size_t size,
  92. enum dma_data_direction dir)
  93. {
  94. unsigned long paddr = page_to_phys(page) + offset;
  95. return dma_map_single(dev, (void *)paddr, size, dir);
  96. }
  97. static inline void
  98. dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  99. size_t size, enum dma_data_direction dir)
  100. {
  101. }
  102. static inline int
  103. dma_map_sg(struct device *dev, struct scatterlist *sg,
  104. int nents, enum dma_data_direction dir)
  105. {
  106. struct scatterlist *s;
  107. int i;
  108. for_each_sg(sg, s, nents, i)
  109. s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
  110. s->length, dir);
  111. return nents;
  112. }
  113. static inline void
  114. dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  115. int nents, enum dma_data_direction dir)
  116. {
  117. struct scatterlist *s;
  118. int i;
  119. for_each_sg(sg, s, nents, i)
  120. dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
  121. }
  122. static inline void
  123. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
  124. size_t size, enum dma_data_direction dir)
  125. {
  126. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
  127. DMA_FROM_DEVICE);
  128. }
  129. static inline void
  130. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  131. size_t size, enum dma_data_direction dir)
  132. {
  133. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
  134. DMA_TO_DEVICE);
  135. }
  136. static inline void
  137. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  138. unsigned long offset, size_t size,
  139. enum dma_data_direction direction)
  140. {
  141. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
  142. size, DMA_FROM_DEVICE);
  143. }
  144. static inline void
  145. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  146. unsigned long offset, size_t size,
  147. enum dma_data_direction direction)
  148. {
  149. _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
  150. size, DMA_TO_DEVICE);
  151. }
  152. static inline void
  153. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  154. enum dma_data_direction dir)
  155. {
  156. int i;
  157. for (i = 0; i < nelems; i++, sg++)
  158. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  159. }
  160. static inline void
  161. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  162. enum dma_data_direction dir)
  163. {
  164. int i;
  165. for (i = 0; i < nelems; i++, sg++)
  166. _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
  167. }
  168. static inline int dma_supported(struct device *dev, u64 dma_mask)
  169. {
  170. /* Support 32 bit DMA mask exclusively */
  171. return dma_mask == DMA_BIT_MASK(32);
  172. }
  173. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  174. {
  175. return 0;
  176. }
  177. static inline int dma_set_mask(struct device *dev, u64 dma_mask)
  178. {
  179. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  180. return -EIO;
  181. *dev->dma_mask = dma_mask;
  182. return 0;
  183. }
  184. #endif