dma-mapping.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <asm/cache.h>
  5. #include <asm/io.h>
  6. #include <asm/scatterlist.h>
  7. #include <asm/bug.h>
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. void *dma_alloc_coherent(struct device *dev, size_t size,
  11. dma_addr_t *dma_handle, gfp_t flag);
  12. void dma_free_coherent(struct device *dev, size_t size,
  13. void *vaddr, dma_addr_t dma_handle);
  14. static inline dma_addr_t
  15. dma_map_single(struct device *dev, void *ptr, size_t size,
  16. enum dma_data_direction direction)
  17. {
  18. BUG_ON(direction == DMA_NONE);
  19. WARN_ON(size == 0);
  20. flush_write_buffers();
  21. return virt_to_phys(ptr);
  22. }
  23. static inline void
  24. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  25. enum dma_data_direction direction)
  26. {
  27. BUG_ON(direction == DMA_NONE);
  28. }
  29. static inline int
  30. dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  31. enum dma_data_direction direction)
  32. {
  33. int i;
  34. BUG_ON(direction == DMA_NONE);
  35. WARN_ON(nents == 0 || sg[0].length == 0);
  36. for (i = 0; i < nents; i++ ) {
  37. BUG_ON(!sg[i].page);
  38. sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
  39. }
  40. flush_write_buffers();
  41. return nents;
  42. }
  43. static inline dma_addr_t
  44. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  45. size_t size, enum dma_data_direction direction)
  46. {
  47. BUG_ON(direction == DMA_NONE);
  48. return page_to_phys(page) + offset;
  49. }
  50. static inline void
  51. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  52. enum dma_data_direction direction)
  53. {
  54. BUG_ON(direction == DMA_NONE);
  55. }
  56. static inline void
  57. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  58. enum dma_data_direction direction)
  59. {
  60. BUG_ON(direction == DMA_NONE);
  61. }
  62. static inline void
  63. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  64. enum dma_data_direction direction)
  65. {
  66. }
  67. static inline void
  68. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  69. enum dma_data_direction direction)
  70. {
  71. flush_write_buffers();
  72. }
  73. static inline void
  74. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  75. unsigned long offset, size_t size,
  76. enum dma_data_direction direction)
  77. {
  78. }
  79. static inline void
  80. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  81. unsigned long offset, size_t size,
  82. enum dma_data_direction direction)
  83. {
  84. flush_write_buffers();
  85. }
  86. static inline void
  87. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  88. enum dma_data_direction direction)
  89. {
  90. }
  91. static inline void
  92. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  93. enum dma_data_direction direction)
  94. {
  95. flush_write_buffers();
  96. }
  97. static inline int
  98. dma_mapping_error(dma_addr_t dma_addr)
  99. {
  100. return 0;
  101. }
  102. static inline int
  103. dma_supported(struct device *dev, u64 mask)
  104. {
  105. /*
  106. * we fall back to GFP_DMA when the mask isn't all 1s,
  107. * so we can't guarantee allocations that must be
  108. * within a tighter range than GFP_DMA..
  109. */
  110. if(mask < 0x00ffffff)
  111. return 0;
  112. return 1;
  113. }
  114. static inline int
  115. dma_set_mask(struct device *dev, u64 mask)
  116. {
  117. if(!dev->dma_mask || !dma_supported(dev, mask))
  118. return -EIO;
  119. *dev->dma_mask = mask;
  120. return 0;
  121. }
  122. static inline int
  123. dma_get_cache_alignment(void)
  124. {
  125. /* no easy way to get cache size on all x86, so return the
  126. * maximum possible, to be safe */
  127. return (1 << INTERNODE_CACHE_SHIFT);
  128. }
  129. #define dma_is_consistent(d) (1)
  130. static inline void
  131. dma_cache_sync(void *vaddr, size_t size,
  132. enum dma_data_direction direction)
  133. {
  134. flush_write_buffers();
  135. }
  136. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  137. extern int
  138. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  139. dma_addr_t device_addr, size_t size, int flags);
  140. extern void
  141. dma_release_declared_memory(struct device *dev);
  142. extern void *
  143. dma_mark_declared_memory_occupied(struct device *dev,
  144. dma_addr_t device_addr, size_t size);
  145. #endif