dma-mapping.h 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <asm/cache.h>
  5. #include <asm/io.h>
  6. #include <asm/scatterlist.h>
  7. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  8. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  9. void *dma_alloc_coherent(struct device *dev, size_t size,
  10. dma_addr_t *dma_handle, unsigned int __nocast flag);
  11. void dma_free_coherent(struct device *dev, size_t size,
  12. void *vaddr, dma_addr_t dma_handle);
  13. static inline dma_addr_t
  14. dma_map_single(struct device *dev, void *ptr, size_t size,
  15. enum dma_data_direction direction)
  16. {
  17. BUG_ON(direction == DMA_NONE);
  18. flush_write_buffers();
  19. return virt_to_phys(ptr);
  20. }
  21. static inline void
  22. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  23. enum dma_data_direction direction)
  24. {
  25. BUG_ON(direction == DMA_NONE);
  26. }
  27. static inline int
  28. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  29. enum dma_data_direction direction)
  30. {
  31. int i;
  32. BUG_ON(direction == DMA_NONE);
  33. for (i = 0; i < nents; i++ ) {
  34. BUG_ON(!sg[i].page);
  35. sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
  36. }
  37. flush_write_buffers();
  38. return nents;
  39. }
  40. static inline dma_addr_t
  41. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  42. size_t size, enum dma_data_direction direction)
  43. {
  44. BUG_ON(direction == DMA_NONE);
  45. return page_to_phys(page) + offset;
  46. }
  47. static inline void
  48. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  49. enum dma_data_direction direction)
  50. {
  51. BUG_ON(direction == DMA_NONE);
  52. }
  53. static inline void
  54. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  55. enum dma_data_direction direction)
  56. {
  57. BUG_ON(direction == DMA_NONE);
  58. }
  59. static inline void
  60. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  61. enum dma_data_direction direction)
  62. {
  63. }
  64. static inline void
  65. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  66. enum dma_data_direction direction)
  67. {
  68. flush_write_buffers();
  69. }
  70. static inline void
  71. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  72. unsigned long offset, size_t size,
  73. enum dma_data_direction direction)
  74. {
  75. }
  76. static inline void
  77. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  78. unsigned long offset, size_t size,
  79. enum dma_data_direction direction)
  80. {
  81. flush_write_buffers();
  82. }
  83. static inline void
  84. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  85. enum dma_data_direction direction)
  86. {
  87. }
  88. static inline void
  89. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  90. enum dma_data_direction direction)
  91. {
  92. flush_write_buffers();
  93. }
  94. static inline int
  95. dma_mapping_error(dma_addr_t dma_addr)
  96. {
  97. return 0;
  98. }
  99. static inline int
  100. dma_supported(struct device *dev, u64 mask)
  101. {
  102. /*
  103. * we fall back to GFP_DMA when the mask isn't all 1s,
  104. * so we can't guarantee allocations that must be
  105. * within a tighter range than GFP_DMA..
  106. */
  107. if(mask < 0x00ffffff)
  108. return 0;
  109. return 1;
  110. }
  111. static inline int
  112. dma_set_mask(struct device *dev, u64 mask)
  113. {
  114. if(!dev->dma_mask || !dma_supported(dev, mask))
  115. return -EIO;
  116. *dev->dma_mask = mask;
  117. return 0;
  118. }
  119. static inline int
  120. dma_get_cache_alignment(void)
  121. {
  122. /* no easy way to get cache size on all x86, so return the
  123. * maximum possible, to be safe */
  124. return (1 << L1_CACHE_SHIFT_MAX);
  125. }
  126. #define dma_is_consistent(d) (1)
  127. static inline void
  128. dma_cache_sync(void *vaddr, size_t size,
  129. enum dma_data_direction direction)
  130. {
  131. flush_write_buffers();
  132. }
  133. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  134. extern int
  135. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  136. dma_addr_t device_addr, size_t size, int flags);
  137. extern void
  138. dma_release_declared_memory(struct device *dev);
  139. extern void *
  140. dma_mark_declared_memory_occupied(struct device *dev,
  141. dma_addr_t device_addr, size_t size);
  142. #endif