dma-mapping.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. #ifndef __ASM_SH_DMA_MAPPING_H
  2. #define __ASM_SH_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <asm/scatterlist.h>
  5. #include <asm/io.h>
  6. struct pci_dev;
  7. extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
  8. dma_addr_t *dma_handle);
  9. extern void consistent_free(struct pci_dev *hwdev, size_t size,
  10. void *vaddr, dma_addr_t dma_handle);
  11. #define dma_supported(dev, mask) (1)
  12. static inline int dma_set_mask(struct device *dev, u64 mask)
  13. {
  14. if (!dev->dma_mask || !dma_supported(dev, mask))
  15. return -EIO;
  16. *dev->dma_mask = mask;
  17. return 0;
  18. }
  19. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  20. dma_addr_t *dma_handle, gfp_t flag)
  21. {
  22. return consistent_alloc(NULL, size, dma_handle);
  23. }
  24. static inline void dma_free_coherent(struct device *dev, size_t size,
  25. void *vaddr, dma_addr_t dma_handle)
  26. {
  27. consistent_free(NULL, size, vaddr, dma_handle);
  28. }
  29. static inline void dma_cache_sync(void *vaddr, size_t size,
  30. enum dma_data_direction dir)
  31. {
  32. dma_cache_wback_inv((unsigned long)vaddr, size);
  33. }
  34. static inline dma_addr_t dma_map_single(struct device *dev,
  35. void *ptr, size_t size,
  36. enum dma_data_direction dir)
  37. {
  38. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  39. if (dev->bus == &pci_bus_type)
  40. return virt_to_bus(ptr);
  41. #endif
  42. dma_cache_sync(ptr, size, dir);
  43. return virt_to_bus(ptr);
  44. }
  45. #define dma_unmap_single(dev, addr, size, dir) do { } while (0)
  46. static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
  47. int nents, enum dma_data_direction dir)
  48. {
  49. int i;
  50. for (i = 0; i < nents; i++) {
  51. #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  52. dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
  53. sg[i].length, dir);
  54. #endif
  55. sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
  56. }
  57. return nents;
  58. }
  59. #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
  60. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  61. unsigned long offset, size_t size,
  62. enum dma_data_direction dir)
  63. {
  64. return dma_map_single(dev, page_address(page) + offset, size, dir);
  65. }
  66. static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
  67. size_t size, enum dma_data_direction dir)
  68. {
  69. dma_unmap_single(dev, dma_address, size, dir);
  70. }
  71. static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
  72. size_t size, enum dma_data_direction dir)
  73. {
  74. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  75. if (dev->bus == &pci_bus_type)
  76. return;
  77. #endif
  78. dma_cache_sync(bus_to_virt(dma_handle), size, dir);
  79. }
  80. static inline void dma_sync_single_range(struct device *dev,
  81. dma_addr_t dma_handle,
  82. unsigned long offset, size_t size,
  83. enum dma_data_direction dir)
  84. {
  85. #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  86. if (dev->bus == &pci_bus_type)
  87. return;
  88. #endif
  89. dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir);
  90. }
  91. static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
  92. int nelems, enum dma_data_direction dir)
  93. {
  94. int i;
  95. for (i = 0; i < nelems; i++) {
  96. #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
  97. dma_cache_sync(page_address(sg[i].page) + sg[i].offset,
  98. sg[i].length, dir);
  99. #endif
  100. sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
  101. }
  102. }
  103. static inline void dma_sync_single_for_cpu(struct device *dev,
  104. dma_addr_t dma_handle, size_t size,
  105. enum dma_data_direction dir)
  106. {
  107. dma_sync_single(dev, dma_handle, size, dir);
  108. }
  109. static inline void dma_sync_single_for_device(struct device *dev,
  110. dma_addr_t dma_handle, size_t size,
  111. enum dma_data_direction dir)
  112. {
  113. dma_sync_single(dev, dma_handle, size, dir);
  114. }
  115. static inline void dma_sync_sg_for_cpu(struct device *dev,
  116. struct scatterlist *sg, int nelems,
  117. enum dma_data_direction dir)
  118. {
  119. dma_sync_sg(dev, sg, nelems, dir);
  120. }
  121. static inline void dma_sync_sg_for_device(struct device *dev,
  122. struct scatterlist *sg, int nelems,
  123. enum dma_data_direction dir)
  124. {
  125. dma_sync_sg(dev, sg, nelems, dir);
  126. }
  127. static inline int dma_get_cache_alignment(void)
  128. {
  129. /*
  130. * Each processor family will define its own L1_CACHE_SHIFT,
  131. * L1_CACHE_BYTES wraps to this, so this is always safe.
  132. */
  133. return L1_CACHE_BYTES;
  134. }
  135. static inline int dma_mapping_error(dma_addr_t dma_addr)
  136. {
  137. return dma_addr == 0;
  138. }
  139. #endif /* __ASM_SH_DMA_MAPPING_H */