pci-dma_64.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <linux/dmar.h>
  10. #include <linux/bootmem.h>
  11. #include <asm/proto.h>
  12. #include <asm/io.h>
  13. #include <asm/gart.h>
  14. #include <asm/calgary.h>
  15. int iommu_merge __read_mostly = 0;
  16. dma_addr_t bad_dma_address __read_mostly;
  17. EXPORT_SYMBOL(bad_dma_address);
  18. /* This tells the BIO block layer to assume merging. Default to off
  19. because we cannot guarantee merging later. */
  20. int iommu_bio_merge __read_mostly = 0;
  21. EXPORT_SYMBOL(iommu_bio_merge);
  22. extern int iommu_sac_force;
  23. int no_iommu __read_mostly;
  24. /* Set this to 1 if there is a HW IOMMU in the system */
  25. int iommu_detected __read_mostly = 0;
  26. /* Dummy device used for NULL arguments (normally ISA). Better would
  27. be probably a smaller DMA mask, but this is bug-to-bug compatible
  28. to i386. */
  29. struct device fallback_dev = {
  30. .bus_id = "fallback device",
  31. .coherent_dma_mask = DMA_32BIT_MASK,
  32. .dma_mask = &fallback_dev.coherent_dma_mask,
  33. };
  34. /* Allocate DMA memory on node near device */
  35. noinline static void *
  36. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  37. {
  38. struct page *page;
  39. int node;
  40. node = dev_to_node(dev);
  41. page = alloc_pages_node(node, gfp, order);
  42. return page ? page_address(page) : NULL;
  43. }
  44. /*
  45. * Allocate memory for a coherent mapping.
  46. */
  47. void *
  48. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  49. gfp_t gfp)
  50. {
  51. void *memory;
  52. unsigned long dma_mask = 0;
  53. u64 bus;
  54. if (!dev)
  55. dev = &fallback_dev;
  56. dma_mask = dev->coherent_dma_mask;
  57. if (dma_mask == 0)
  58. dma_mask = DMA_32BIT_MASK;
  59. /* Device not DMA able */
  60. if (dev->dma_mask == NULL)
  61. return NULL;
  62. /* Don't invoke OOM killer */
  63. gfp |= __GFP_NORETRY;
  64. /* Kludge to make it bug-to-bug compatible with i386. i386
  65. uses the normal dma_mask for alloc_coherent. */
  66. dma_mask &= *dev->dma_mask;
  67. /* Why <=? Even when the mask is smaller than 4GB it is often
  68. larger than 16MB and in this case we have a chance of
  69. finding fitting memory in the next higher zone first. If
  70. not retry with true GFP_DMA. -AK */
  71. if (dma_mask <= DMA_32BIT_MASK)
  72. gfp |= GFP_DMA32;
  73. again:
  74. memory = dma_alloc_pages(dev, gfp, get_order(size));
  75. if (memory == NULL)
  76. return NULL;
  77. {
  78. int high, mmu;
  79. bus = virt_to_bus(memory);
  80. high = (bus + size) >= dma_mask;
  81. mmu = high;
  82. if (force_iommu && !(gfp & GFP_DMA))
  83. mmu = 1;
  84. else if (high) {
  85. free_pages((unsigned long)memory,
  86. get_order(size));
  87. /* Don't use the 16MB ZONE_DMA unless absolutely
  88. needed. It's better to use remapping first. */
  89. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  90. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  91. goto again;
  92. }
  93. /* Let low level make its own zone decisions */
  94. gfp &= ~(GFP_DMA32|GFP_DMA);
  95. if (dma_ops->alloc_coherent)
  96. return dma_ops->alloc_coherent(dev, size,
  97. dma_handle, gfp);
  98. return NULL;
  99. }
  100. memset(memory, 0, size);
  101. if (!mmu) {
  102. *dma_handle = virt_to_bus(memory);
  103. return memory;
  104. }
  105. }
  106. if (dma_ops->alloc_coherent) {
  107. free_pages((unsigned long)memory, get_order(size));
  108. gfp &= ~(GFP_DMA|GFP_DMA32);
  109. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  110. }
  111. if (dma_ops->map_simple) {
  112. *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
  113. size,
  114. PCI_DMA_BIDIRECTIONAL);
  115. if (*dma_handle != bad_dma_address)
  116. return memory;
  117. }
  118. if (panic_on_overflow)
  119. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  120. free_pages((unsigned long)memory, get_order(size));
  121. return NULL;
  122. }
  123. EXPORT_SYMBOL(dma_alloc_coherent);
  124. /*
  125. * Unmap coherent memory.
  126. * The caller must ensure that the device has finished accessing the mapping.
  127. */
  128. void dma_free_coherent(struct device *dev, size_t size,
  129. void *vaddr, dma_addr_t bus)
  130. {
  131. WARN_ON(irqs_disabled()); /* for portability */
  132. if (dma_ops->unmap_single)
  133. dma_ops->unmap_single(dev, bus, size, 0);
  134. free_pages((unsigned long)vaddr, get_order(size));
  135. }
  136. EXPORT_SYMBOL(dma_free_coherent);
  137. /*
  138. * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
  139. * documentation.
  140. */
  141. static __init int iommu_setup(char *p)
  142. {
  143. iommu_merge = 1;
  144. if (!p)
  145. return -EINVAL;
  146. while (*p) {
  147. if (!strncmp(p, "off", 3))
  148. no_iommu = 1;
  149. /* gart_parse_options has more force support */
  150. if (!strncmp(p, "force", 5))
  151. force_iommu = 1;
  152. if (!strncmp(p, "noforce", 7)) {
  153. iommu_merge = 0;
  154. force_iommu = 0;
  155. }
  156. if (!strncmp(p, "biomerge", 8)) {
  157. iommu_bio_merge = 4096;
  158. iommu_merge = 1;
  159. force_iommu = 1;
  160. }
  161. if (!strncmp(p, "panic", 5))
  162. panic_on_overflow = 1;
  163. if (!strncmp(p, "nopanic", 7))
  164. panic_on_overflow = 0;
  165. if (!strncmp(p, "merge", 5)) {
  166. iommu_merge = 1;
  167. force_iommu = 1;
  168. }
  169. if (!strncmp(p, "nomerge", 7))
  170. iommu_merge = 0;
  171. if (!strncmp(p, "forcesac", 8))
  172. iommu_sac_force = 1;
  173. if (!strncmp(p, "allowdac", 8))
  174. forbid_dac = 0;
  175. if (!strncmp(p, "nodac", 5))
  176. forbid_dac = -1;
  177. #ifdef CONFIG_SWIOTLB
  178. if (!strncmp(p, "soft", 4))
  179. swiotlb = 1;
  180. #endif
  181. #ifdef CONFIG_GART_IOMMU
  182. gart_parse_options(p);
  183. #endif
  184. #ifdef CONFIG_CALGARY_IOMMU
  185. if (!strncmp(p, "calgary", 7))
  186. use_calgary = 1;
  187. #endif /* CONFIG_CALGARY_IOMMU */
  188. p += strcspn(p, ",");
  189. if (*p == ',')
  190. ++p;
  191. }
  192. return 0;
  193. }
  194. early_param("iommu", iommu_setup);