dma-mapping.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later.
  5. */
  6. #ifndef _BLACKFIN_DMA_MAPPING_H
  7. #define _BLACKFIN_DMA_MAPPING_H
  8. #include <asm/cacheflush.h>
  9. struct scatterlist;
  10. void *dma_alloc_coherent(struct device *dev, size_t size,
  11. dma_addr_t *dma_handle, gfp_t gfp);
  12. void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
  13. dma_addr_t dma_handle);
  14. /*
  15. * Now for the API extensions over the pci_ one
  16. */
  17. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  18. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  19. #define dma_supported(d, m) (1)
  20. #define dma_is_consistent(d, h) (1)
  21. static inline int
  22. dma_set_mask(struct device *dev, u64 dma_mask)
  23. {
  24. if (!dev->dma_mask || !dma_supported(dev, dma_mask))
  25. return -EIO;
  26. *dev->dma_mask = dma_mask;
  27. return 0;
  28. }
  29. static inline int
  30. dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  31. {
  32. return 0;
  33. }
  34. extern void
  35. __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
  36. static inline void
  37. __dma_sync_inline(dma_addr_t addr, size_t size, enum dma_data_direction dir)
  38. {
  39. switch (dir) {
  40. case DMA_NONE:
  41. BUG();
  42. case DMA_TO_DEVICE: /* writeback only */
  43. flush_dcache_range(addr, addr + size);
  44. break;
  45. case DMA_FROM_DEVICE: /* invalidate only */
  46. case DMA_BIDIRECTIONAL: /* flush and invalidate */
  47. /* Blackfin has no dedicated invalidate (it includes a flush) */
  48. invalidate_dcache_range(addr, addr + size);
  49. break;
  50. }
  51. }
  52. static inline void
  53. _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
  54. {
  55. if (__builtin_constant_p(dir))
  56. __dma_sync_inline(addr, size, dir);
  57. else
  58. __dma_sync(addr, size, dir);
  59. }
  60. static inline dma_addr_t
  61. dma_map_single(struct device *dev, void *ptr, size_t size,
  62. enum dma_data_direction dir)
  63. {
  64. _dma_sync((dma_addr_t)ptr, size, dir);
  65. return (dma_addr_t) ptr;
  66. }
  67. static inline dma_addr_t
  68. dma_map_page(struct device *dev, struct page *page,
  69. unsigned long offset, size_t size,
  70. enum dma_data_direction dir)
  71. {
  72. return dma_map_single(dev, page_address(page) + offset, size, dir);
  73. }
  74. static inline void
  75. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  76. enum dma_data_direction dir)
  77. {
  78. BUG_ON(!valid_dma_direction(dir));
  79. }
  80. static inline void
  81. dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
  82. enum dma_data_direction dir)
  83. {
  84. dma_unmap_single(dev, dma_addr, size, dir);
  85. }
  86. extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  87. enum dma_data_direction dir);
  88. static inline void
  89. dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  90. int nhwentries, enum dma_data_direction dir)
  91. {
  92. BUG_ON(!valid_dma_direction(dir));
  93. }
  94. static inline void
  95. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
  96. unsigned long offset, size_t size,
  97. enum dma_data_direction dir)
  98. {
  99. BUG_ON(!valid_dma_direction(dir));
  100. }
  101. static inline void
  102. dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
  103. unsigned long offset, size_t size,
  104. enum dma_data_direction dir)
  105. {
  106. _dma_sync(handle + offset, size, dir);
  107. }
  108. static inline void
  109. dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
  110. enum dma_data_direction dir)
  111. {
  112. dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
  113. }
  114. static inline void
  115. dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
  116. enum dma_data_direction dir)
  117. {
  118. dma_sync_single_range_for_device(dev, handle, 0, size, dir);
  119. }
  120. static inline void
  121. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
  122. enum dma_data_direction dir)
  123. {
  124. BUG_ON(!valid_dma_direction(dir));
  125. }
  126. extern void
  127. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  128. int nents, enum dma_data_direction dir);
  129. static inline void
  130. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  131. enum dma_data_direction dir)
  132. {
  133. _dma_sync((dma_addr_t)vaddr, size, dir);
  134. }
  135. #endif /* _BLACKFIN_DMA_MAPPING_H */