dma-mapping.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <asm/cache.h>
  5. #include <asm/io.h>
  6. #include <asm/scatterlist.h>
  7. #include <asm/bug.h>
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. void *dma_alloc_coherent(struct device *dev, size_t size,
  11. dma_addr_t *dma_handle, gfp_t flag);
  12. void dma_free_coherent(struct device *dev, size_t size,
  13. void *vaddr, dma_addr_t dma_handle);
  14. static inline dma_addr_t
  15. dma_map_single(struct device *dev, void *ptr, size_t size,
  16. enum dma_data_direction direction)
  17. {
  18. if (direction == DMA_NONE)
  19. BUG();
  20. WARN_ON(size == 0);
  21. flush_write_buffers();
  22. return virt_to_phys(ptr);
  23. }
  24. static inline void
  25. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  26. enum dma_data_direction direction)
  27. {
  28. if (direction == DMA_NONE)
  29. BUG();
  30. }
  31. static inline int
  32. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  33. enum dma_data_direction direction)
  34. {
  35. int i;
  36. if (direction == DMA_NONE)
  37. BUG();
  38. WARN_ON(nents == 0 || sg[0].length == 0);
  39. for (i = 0; i < nents; i++ ) {
  40. BUG_ON(!sg[i].page);
  41. sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
  42. }
  43. flush_write_buffers();
  44. return nents;
  45. }
  46. static inline dma_addr_t
  47. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  48. size_t size, enum dma_data_direction direction)
  49. {
  50. BUG_ON(direction == DMA_NONE);
  51. return page_to_phys(page) + offset;
  52. }
  53. static inline void
  54. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  55. enum dma_data_direction direction)
  56. {
  57. BUG_ON(direction == DMA_NONE);
  58. }
  59. static inline void
  60. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  61. enum dma_data_direction direction)
  62. {
  63. BUG_ON(direction == DMA_NONE);
  64. }
  65. static inline void
  66. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  67. enum dma_data_direction direction)
  68. {
  69. }
  70. static inline void
  71. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  72. enum dma_data_direction direction)
  73. {
  74. flush_write_buffers();
  75. }
  76. static inline void
  77. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  78. unsigned long offset, size_t size,
  79. enum dma_data_direction direction)
  80. {
  81. }
  82. static inline void
  83. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  84. unsigned long offset, size_t size,
  85. enum dma_data_direction direction)
  86. {
  87. flush_write_buffers();
  88. }
  89. static inline void
  90. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  91. enum dma_data_direction direction)
  92. {
  93. }
  94. static inline void
  95. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  96. enum dma_data_direction direction)
  97. {
  98. flush_write_buffers();
  99. }
  100. static inline int
  101. dma_mapping_error(dma_addr_t dma_addr)
  102. {
  103. return 0;
  104. }
  105. static inline int
  106. dma_supported(struct device *dev, u64 mask)
  107. {
  108. /*
  109. * we fall back to GFP_DMA when the mask isn't all 1s,
  110. * so we can't guarantee allocations that must be
  111. * within a tighter range than GFP_DMA..
  112. */
  113. if(mask < 0x00ffffff)
  114. return 0;
  115. return 1;
  116. }
  117. static inline int
  118. dma_set_mask(struct device *dev, u64 mask)
  119. {
  120. if(!dev->dma_mask || !dma_supported(dev, mask))
  121. return -EIO;
  122. *dev->dma_mask = mask;
  123. return 0;
  124. }
  125. static inline int
  126. dma_get_cache_alignment(void)
  127. {
  128. /* no easy way to get cache size on all x86, so return the
  129. * maximum possible, to be safe */
  130. return (1 << INTERNODE_CACHE_SHIFT);
  131. }
  132. #define dma_is_consistent(d) (1)
  133. static inline void
  134. dma_cache_sync(void *vaddr, size_t size,
  135. enum dma_data_direction direction)
  136. {
  137. flush_write_buffers();
  138. }
  139. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  140. extern int
  141. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  142. dma_addr_t device_addr, size_t size, int flags);
  143. extern void
  144. dma_release_declared_memory(struct device *dev);
  145. extern void *
  146. dma_mark_declared_memory_occupied(struct device *dev,
  147. dma_addr_t device_addr, size_t size);
  148. #endif