pci-dma.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. /*
  2. * Dynamic DMA mapping support.
  3. */
  4. #include <linux/types.h>
  5. #include <linux/mm.h>
  6. #include <linux/string.h>
  7. #include <linux/pci.h>
  8. #include <linux/module.h>
  9. #include <asm/io.h>
  10. #include <asm/iommu.h>
  11. #include <asm/calgary.h>
  12. int iommu_merge __read_mostly = 0;
  13. EXPORT_SYMBOL(iommu_merge);
  14. dma_addr_t bad_dma_address __read_mostly;
  15. EXPORT_SYMBOL(bad_dma_address);
  16. /* This tells the BIO block layer to assume merging. Default to off
  17. because we cannot guarantee merging later. */
  18. int iommu_bio_merge __read_mostly = 0;
  19. EXPORT_SYMBOL(iommu_bio_merge);
  20. static int iommu_sac_force __read_mostly = 0;
  21. int no_iommu __read_mostly;
  22. #ifdef CONFIG_IOMMU_DEBUG
  23. int panic_on_overflow __read_mostly = 1;
  24. int force_iommu __read_mostly = 1;
  25. #else
  26. int panic_on_overflow __read_mostly = 0;
  27. int force_iommu __read_mostly= 0;
  28. #endif
  29. /* Set this to 1 if there is a HW IOMMU in the system */
  30. int iommu_detected __read_mostly = 0;
  31. /* Dummy device used for NULL arguments (normally ISA). Better would
  32. be probably a smaller DMA mask, but this is bug-to-bug compatible
  33. to i386. */
  34. struct device fallback_dev = {
  35. .bus_id = "fallback device",
  36. .coherent_dma_mask = DMA_32BIT_MASK,
  37. .dma_mask = &fallback_dev.coherent_dma_mask,
  38. };
  39. /* Allocate DMA memory on node near device */
  40. noinline static void *
  41. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  42. {
  43. struct page *page;
  44. int node;
  45. #ifdef CONFIG_PCI
  46. if (dev->bus == &pci_bus_type)
  47. node = pcibus_to_node(to_pci_dev(dev)->bus);
  48. else
  49. #endif
  50. node = numa_node_id();
  51. if (node < first_node(node_online_map))
  52. node = first_node(node_online_map);
  53. page = alloc_pages_node(node, gfp, order);
  54. return page ? page_address(page) : NULL;
  55. }
  56. /*
  57. * Allocate memory for a coherent mapping.
  58. */
  59. void *
  60. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  61. gfp_t gfp)
  62. {
  63. void *memory;
  64. unsigned long dma_mask = 0;
  65. u64 bus;
  66. if (!dev)
  67. dev = &fallback_dev;
  68. dma_mask = dev->coherent_dma_mask;
  69. if (dma_mask == 0)
  70. dma_mask = DMA_32BIT_MASK;
  71. /* Device not DMA able */
  72. if (dev->dma_mask == NULL)
  73. return NULL;
  74. /* Don't invoke OOM killer */
  75. gfp |= __GFP_NORETRY;
  76. /* Kludge to make it bug-to-bug compatible with i386. i386
  77. uses the normal dma_mask for alloc_coherent. */
  78. dma_mask &= *dev->dma_mask;
  79. /* Why <=? Even when the mask is smaller than 4GB it is often
  80. larger than 16MB and in this case we have a chance of
  81. finding fitting memory in the next higher zone first. If
  82. not retry with true GFP_DMA. -AK */
  83. if (dma_mask <= DMA_32BIT_MASK)
  84. gfp |= GFP_DMA32;
  85. again:
  86. memory = dma_alloc_pages(dev, gfp, get_order(size));
  87. if (memory == NULL)
  88. return NULL;
  89. {
  90. int high, mmu;
  91. bus = virt_to_bus(memory);
  92. high = (bus + size) >= dma_mask;
  93. mmu = high;
  94. if (force_iommu && !(gfp & GFP_DMA))
  95. mmu = 1;
  96. else if (high) {
  97. free_pages((unsigned long)memory,
  98. get_order(size));
  99. /* Don't use the 16MB ZONE_DMA unless absolutely
  100. needed. It's better to use remapping first. */
  101. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  102. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  103. goto again;
  104. }
  105. /* Let low level make its own zone decisions */
  106. gfp &= ~(GFP_DMA32|GFP_DMA);
  107. if (dma_ops->alloc_coherent)
  108. return dma_ops->alloc_coherent(dev, size,
  109. dma_handle, gfp);
  110. return NULL;
  111. }
  112. memset(memory, 0, size);
  113. if (!mmu) {
  114. *dma_handle = virt_to_bus(memory);
  115. return memory;
  116. }
  117. }
  118. if (dma_ops->alloc_coherent) {
  119. free_pages((unsigned long)memory, get_order(size));
  120. gfp &= ~(GFP_DMA|GFP_DMA32);
  121. return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
  122. }
  123. if (dma_ops->map_simple) {
  124. *dma_handle = dma_ops->map_simple(dev, memory,
  125. size,
  126. PCI_DMA_BIDIRECTIONAL);
  127. if (*dma_handle != bad_dma_address)
  128. return memory;
  129. }
  130. if (panic_on_overflow)
  131. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
  132. free_pages((unsigned long)memory, get_order(size));
  133. return NULL;
  134. }
  135. EXPORT_SYMBOL(dma_alloc_coherent);
  136. /*
  137. * Unmap coherent memory.
  138. * The caller must ensure that the device has finished accessing the mapping.
  139. */
  140. void dma_free_coherent(struct device *dev, size_t size,
  141. void *vaddr, dma_addr_t bus)
  142. {
  143. if (dma_ops->unmap_single)
  144. dma_ops->unmap_single(dev, bus, size, 0);
  145. free_pages((unsigned long)vaddr, get_order(size));
  146. }
  147. EXPORT_SYMBOL(dma_free_coherent);
  148. static int forbid_dac __read_mostly;
  149. int dma_supported(struct device *dev, u64 mask)
  150. {
  151. #ifdef CONFIG_PCI
  152. if (mask > 0xffffffff && forbid_dac > 0) {
  153. printk(KERN_INFO "PCI: Disallowing DAC for device %s\n", dev->bus_id);
  154. return 0;
  155. }
  156. #endif
  157. if (dma_ops->dma_supported)
  158. return dma_ops->dma_supported(dev, mask);
  159. /* Copied from i386. Doesn't make much sense, because it will
  160. only work for pci_alloc_coherent.
  161. The caller just has to use GFP_DMA in this case. */
  162. if (mask < DMA_24BIT_MASK)
  163. return 0;
  164. /* Tell the device to use SAC when IOMMU force is on. This
  165. allows the driver to use cheaper accesses in some cases.
  166. Problem with this is that if we overflow the IOMMU area and
  167. return DAC as fallback address the device may not handle it
  168. correctly.
  169. As a special case some controllers have a 39bit address
  170. mode that is as efficient as 32bit (aic79xx). Don't force
  171. SAC for these. Assume all masks <= 40 bits are of this
  172. type. Normally this doesn't make any difference, but gives
  173. more gentle handling of IOMMU overflow. */
  174. if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
  175. printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
  176. return 0;
  177. }
  178. return 1;
  179. }
  180. EXPORT_SYMBOL(dma_supported);
  181. int dma_set_mask(struct device *dev, u64 mask)
  182. {
  183. if (!dev->dma_mask || !dma_supported(dev, mask))
  184. return -EIO;
  185. *dev->dma_mask = mask;
  186. return 0;
  187. }
  188. EXPORT_SYMBOL(dma_set_mask);
  189. /*
  190. * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
  191. * documentation.
  192. */
  193. __init int iommu_setup(char *p)
  194. {
  195. iommu_merge = 1;
  196. if (!p)
  197. return -EINVAL;
  198. while (*p) {
  199. if (!strncmp(p,"off",3))
  200. no_iommu = 1;
  201. /* gart_parse_options has more force support */
  202. if (!strncmp(p,"force",5))
  203. force_iommu = 1;
  204. if (!strncmp(p,"noforce",7)) {
  205. iommu_merge = 0;
  206. force_iommu = 0;
  207. }
  208. if (!strncmp(p, "biomerge",8)) {
  209. iommu_bio_merge = 4096;
  210. iommu_merge = 1;
  211. force_iommu = 1;
  212. }
  213. if (!strncmp(p, "panic",5))
  214. panic_on_overflow = 1;
  215. if (!strncmp(p, "nopanic",7))
  216. panic_on_overflow = 0;
  217. if (!strncmp(p, "merge",5)) {
  218. iommu_merge = 1;
  219. force_iommu = 1;
  220. }
  221. if (!strncmp(p, "nomerge",7))
  222. iommu_merge = 0;
  223. if (!strncmp(p, "forcesac",8))
  224. iommu_sac_force = 1;
  225. if (!strncmp(p, "allowdac", 8))
  226. forbid_dac = 0;
  227. if (!strncmp(p, "nodac", 5))
  228. forbid_dac = -1;
  229. #ifdef CONFIG_SWIOTLB
  230. if (!strncmp(p, "soft",4))
  231. swiotlb = 1;
  232. #endif
  233. #ifdef CONFIG_IOMMU
  234. gart_parse_options(p);
  235. #endif
  236. #ifdef CONFIG_CALGARY_IOMMU
  237. if (!strncmp(p, "calgary", 7))
  238. use_calgary = 1;
  239. #endif /* CONFIG_CALGARY_IOMMU */
  240. p += strcspn(p, ",");
  241. if (*p == ',')
  242. ++p;
  243. }
  244. return 0;
  245. }
  246. early_param("iommu", iommu_setup);
  247. void __init pci_iommu_alloc(void)
  248. {
  249. /*
  250. * The order of these functions is important for
  251. * fall-back/fail-over reasons
  252. */
  253. #ifdef CONFIG_IOMMU
  254. iommu_hole_init();
  255. #endif
  256. #ifdef CONFIG_CALGARY_IOMMU
  257. detect_calgary();
  258. #endif
  259. #ifdef CONFIG_SWIOTLB
  260. pci_swiotlb_init();
  261. #endif
  262. }
  263. static int __init pci_iommu_init(void)
  264. {
  265. #ifdef CONFIG_CALGARY_IOMMU
  266. calgary_iommu_init();
  267. #endif
  268. #ifdef CONFIG_IOMMU
  269. gart_iommu_init();
  270. #endif
  271. no_iommu_init();
  272. return 0;
  273. }
  274. void pci_iommu_shutdown(void)
  275. {
  276. gart_iommu_shutdown();
  277. }
  278. #ifdef CONFIG_PCI
  279. /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
  280. static __devinit void via_no_dac(struct pci_dev *dev)
  281. {
  282. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
  283. printk(KERN_INFO "PCI: VIA PCI bridge detected. Disabling DAC.\n");
  284. forbid_dac = 1;
  285. }
  286. }
  287. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
  288. #endif
  289. /* Must execute after PCI subsystem */
  290. fs_initcall(pci_iommu_init);