dma-mapping.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. #ifndef __ASM_SH_DMA_MAPPING_H
  2. #define __ASM_SH_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <linux/scatterlist.h>
  5. #include <asm/io.h>
  6. struct pci_dev;
  7. extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
  8. dma_addr_t *dma_handle);
  9. extern void consistent_free(struct pci_dev *hwdev, size_t size,
  10. void *vaddr, dma_addr_t dma_handle);
  11. #define dma_supported(dev, mask) (1)
  12. static inline int dma_set_mask(struct device *dev, u64 mask)
  13. {
  14. if (!dev->dma_mask || !dma_supported(dev, mask))
  15. return -EIO;
  16. *dev->dma_mask = mask;
  17. return 0;
  18. }
  19. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  20. dma_addr_t *dma_handle, gfp_t flag)
  21. {
  22. return consistent_alloc(NULL, size, dma_handle);
  23. }
  24. static inline void dma_free_coherent(struct device *dev, size_t size,
  25. void *vaddr, dma_addr_t dma_handle)
  26. {
  27. consistent_free(NULL, size, vaddr, dma_handle);
  28. }
  29. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  30. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  31. #define dma_is_consistent(d, h) (1)
  32. static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  33. enum dma_data_direction dir)
  34. {
  35. unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
  36. unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;
  37. for (; s <= e; s += L1_CACHE_BYTES)
  38. asm volatile ("ocbp %0, 0" : : "r" (s));
  39. }
  40. static inline dma_addr_t dma_map_single(struct device *dev,
  41. void *ptr, size_t size,
  42. enum dma_data_direction dir)
  43. {
  44. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  45. if (dev->bus == &pci_bus_type)
  46. return virt_to_phys(ptr);
  47. #endif
  48. dma_cache_sync(dev, ptr, size, dir);
  49. return virt_to_phys(ptr);
  50. }
  51. #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
  52. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  53. int nents, enum dma_data_direction dir)
  54. {
  55. int i;
  56. for (i = 0; i < nents; i++) {
  57. #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  58. dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
  59. #endif
  60. sg[i].dma_address = sg_phys(&sg[i]);
  61. }
  62. return nents;
  63. }
  64. #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
  65. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  66. unsigned long offset, size_t size,
  67. enum dma_data_direction dir)
  68. {
  69. return dma_map_single(dev, page_address(page) + offset, size, dir);
  70. }
  71. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  72. size_t size, enum dma_data_direction dir)
  73. {
  74. dma_unmap_single(dev, dma_address, size, dir);
  75. }
  76. static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
  77. size_t size, enum dma_data_direction dir)
  78. {
  79. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  80. if (dev->bus == &pci_bus_type)
  81. return;
  82. #endif
  83. dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
  84. }
  85. static inline void dma_sync_single_range(struct device *dev,
  86. dma_addr_t dma_handle,
  87. unsigned long offset, size_t size,
  88. enum dma_data_direction dir)
  89. {
  90. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  91. if (dev->bus == &pci_bus_type)
  92. return;
  93. #endif
  94. dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
  95. }
  96. static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
  97. int nelems, enum dma_data_direction dir)
  98. {
  99. int i;
  100. for (i = 0; i < nelems; i++) {
  101. #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  102. dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
  103. #endif
  104. sg[i].dma_address = sg_phys(&sg[i]);
  105. }
  106. }
  107. static inline void dma_sync_single_for_cpu(struct device *dev,
  108. dma_addr_t dma_handle, size_t size,
  109. enum dma_data_direction dir)
  110. {
  111. dma_sync_single(dev, dma_handle, size, dir);
  112. }
  113. static inline void dma_sync_single_for_device(struct device *dev,
  114. dma_addr_t dma_handle, size_t size,
  115. enum dma_data_direction dir)
  116. {
  117. dma_sync_single(dev, dma_handle, size, dir);
  118. }
  119. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  120. dma_addr_t dma_handle,
  121. unsigned long offset,
  122. size_t size,
  123. enum dma_data_direction direction)
  124. {
  125. dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
  126. }
  127. static inline void dma_sync_single_range_for_device(struct device *dev,
  128. dma_addr_t dma_handle,
  129. unsigned long offset,
  130. size_t size,
  131. enum dma_data_direction direction)
  132. {
  133. dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
  134. }
  135. static inline void dma_sync_sg_for_cpu(struct device *dev,
  136. struct scatterlist *sg, int nelems,
  137. enum dma_data_direction dir)
  138. {
  139. dma_sync_sg(dev, sg, nelems, dir);
  140. }
  141. static inline void dma_sync_sg_for_device(struct device *dev,
  142. struct scatterlist *sg, int nelems,
  143. enum dma_data_direction dir)
  144. {
  145. dma_sync_sg(dev, sg, nelems, dir);
  146. }
  147. static inline int dma_get_cache_alignment(void)
  148. {
  149. /*
  150. * Each processor family will define its own L1_CACHE_SHIFT,
  151. * L1_CACHE_BYTES wraps to this, so this is always safe.
  152. */
  153. return L1_CACHE_BYTES;
  154. }
  155. static inline int dma_mapping_error(dma_addr_t dma_addr)
  156. {
  157. return dma_addr == 0;
  158. }
  159. #endif /* __ASM_SH_DMA_MAPPING_H */