dma-mapping_32.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. #ifndef _ASM_I386_DMA_MAPPING_H
  2. #define _ASM_I386_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <linux/scatterlist.h>
  5. #include <asm/cache.h>
  6. #include <asm/io.h>
  7. #include <asm/bug.h>
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. void *dma_alloc_coherent(struct device *dev, size_t size,
  11. dma_addr_t *dma_handle, gfp_t flag);
  12. void dma_free_coherent(struct device *dev, size_t size,
  13. void *vaddr, dma_addr_t dma_handle);
  14. static inline dma_addr_t
  15. dma_map_single(struct device *dev, void *ptr, size_t size,
  16. enum dma_data_direction direction)
  17. {
  18. BUG_ON(!valid_dma_direction(direction));
  19. WARN_ON(size == 0);
  20. flush_write_buffers();
  21. return virt_to_phys(ptr);
  22. }
  23. static inline void
  24. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  25. enum dma_data_direction direction)
  26. {
  27. BUG_ON(!valid_dma_direction(direction));
  28. }
  29. static inline int
  30. dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  31. enum dma_data_direction direction)
  32. {
  33. struct scatterlist *sg;
  34. int i;
  35. BUG_ON(!valid_dma_direction(direction));
  36. WARN_ON(nents == 0 || sglist[0].length == 0);
  37. for_each_sg(sglist, sg, nents, i) {
  38. BUG_ON(!sg_page(sg));
  39. sg->dma_address = sg_phys(sg);
  40. }
  41. flush_write_buffers();
  42. return nents;
  43. }
  44. static inline dma_addr_t
  45. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  46. size_t size, enum dma_data_direction direction)
  47. {
  48. BUG_ON(!valid_dma_direction(direction));
  49. return page_to_phys(page) + offset;
  50. }
  51. static inline void
  52. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  53. enum dma_data_direction direction)
  54. {
  55. BUG_ON(!valid_dma_direction(direction));
  56. }
  57. static inline void
  58. dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
  59. enum dma_data_direction direction)
  60. {
  61. BUG_ON(!valid_dma_direction(direction));
  62. }
  63. static inline void
  64. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  65. enum dma_data_direction direction)
  66. {
  67. }
  68. static inline void
  69. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
  70. enum dma_data_direction direction)
  71. {
  72. flush_write_buffers();
  73. }
  74. static inline void
  75. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  76. unsigned long offset, size_t size,
  77. enum dma_data_direction direction)
  78. {
  79. }
  80. static inline void
  81. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  82. unsigned long offset, size_t size,
  83. enum dma_data_direction direction)
  84. {
  85. flush_write_buffers();
  86. }
  87. static inline void
  88. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  89. enum dma_data_direction direction)
  90. {
  91. }
  92. static inline void
  93. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  94. enum dma_data_direction direction)
  95. {
  96. flush_write_buffers();
  97. }
  98. static inline int
  99. dma_mapping_error(dma_addr_t dma_addr)
  100. {
  101. return 0;
  102. }
  103. extern int forbid_dac;
  104. static inline int
  105. dma_supported(struct device *dev, u64 mask)
  106. {
  107. /*
  108. * we fall back to GFP_DMA when the mask isn't all 1s,
  109. * so we can't guarantee allocations that must be
  110. * within a tighter range than GFP_DMA..
  111. */
  112. if(mask < 0x00ffffff)
  113. return 0;
  114. /* Work around chipset bugs */
  115. if (forbid_dac > 0 && mask > 0xffffffffULL)
  116. return 0;
  117. return 1;
  118. }
  119. static inline int
  120. dma_set_mask(struct device *dev, u64 mask)
  121. {
  122. if(!dev->dma_mask || !dma_supported(dev, mask))
  123. return -EIO;
  124. *dev->dma_mask = mask;
  125. return 0;
  126. }
  127. static inline int
  128. dma_get_cache_alignment(void)
  129. {
  130. /* no easy way to get cache size on all x86, so return the
  131. * maximum possible, to be safe */
  132. return (1 << INTERNODE_CACHE_SHIFT);
  133. }
  134. #define dma_is_consistent(d, h) (1)
  135. static inline void
  136. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  137. enum dma_data_direction direction)
  138. {
  139. flush_write_buffers();
  140. }
  141. #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
  142. extern int
  143. dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  144. dma_addr_t device_addr, size_t size, int flags);
  145. extern void
  146. dma_release_declared_memory(struct device *dev);
  147. extern void *
  148. dma_mark_declared_memory_occupied(struct device *dev,
  149. dma_addr_t device_addr, size_t size);
  150. #endif