pci-dma.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. #include <linux/dma-mapping.h>
  2. #include <linux/dmar.h>
  3. #include <linux/bootmem.h>
  4. #include <linux/pci.h>
  5. #include <asm/proto.h>
  6. #include <asm/dma.h>
  7. #include <asm/iommu.h>
  8. #include <asm/calgary.h>
  9. #include <asm/amd_iommu.h>
  10. static int forbid_dac __read_mostly;
  11. struct dma_mapping_ops *dma_ops;
  12. EXPORT_SYMBOL(dma_ops);
  13. static int iommu_sac_force __read_mostly;
  14. #ifdef CONFIG_IOMMU_DEBUG
  15. int panic_on_overflow __read_mostly = 1;
  16. int force_iommu __read_mostly = 1;
  17. #else
  18. int panic_on_overflow __read_mostly = 0;
  19. int force_iommu __read_mostly = 0;
  20. #endif
  21. int iommu_merge __read_mostly = 0;
  22. int no_iommu __read_mostly;
  23. /* Set this to 1 if there is a HW IOMMU in the system */
  24. int iommu_detected __read_mostly = 0;
  25. /* This tells the BIO block layer to assume merging. Default to off
  26. because we cannot guarantee merging later. */
  27. int iommu_bio_merge __read_mostly = 0;
  28. EXPORT_SYMBOL(iommu_bio_merge);
  29. dma_addr_t bad_dma_address __read_mostly = 0;
  30. EXPORT_SYMBOL(bad_dma_address);
  31. /* Dummy device used for NULL arguments (normally ISA). Better would
  32. be probably a smaller DMA mask, but this is bug-to-bug compatible
  33. to older i386. */
  34. struct device fallback_dev = {
  35. .bus_id = "fallback device",
  36. .coherent_dma_mask = DMA_32BIT_MASK,
  37. .dma_mask = &fallback_dev.coherent_dma_mask,
  38. };
  39. int dma_set_mask(struct device *dev, u64 mask)
  40. {
  41. if (!dev->dma_mask || !dma_supported(dev, mask))
  42. return -EIO;
  43. *dev->dma_mask = mask;
  44. return 0;
  45. }
  46. EXPORT_SYMBOL(dma_set_mask);
  47. #ifdef CONFIG_X86_64
  48. static __initdata void *dma32_bootmem_ptr;
  49. static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
  50. static int __init parse_dma32_size_opt(char *p)
  51. {
  52. if (!p)
  53. return -EINVAL;
  54. dma32_bootmem_size = memparse(p, &p);
  55. return 0;
  56. }
  57. early_param("dma32_size", parse_dma32_size_opt);
  58. void __init dma32_reserve_bootmem(void)
  59. {
  60. unsigned long size, align;
  61. if (max_pfn <= MAX_DMA32_PFN)
  62. return;
  63. /*
  64. * check aperture_64.c allocate_aperture() for reason about
  65. * using 512M as goal
  66. */
  67. align = 64ULL<<20;
  68. size = round_up(dma32_bootmem_size, align);
  69. dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
  70. 512ULL<<20);
  71. if (dma32_bootmem_ptr)
  72. dma32_bootmem_size = size;
  73. else
  74. dma32_bootmem_size = 0;
  75. }
  76. static void __init dma32_free_bootmem(void)
  77. {
  78. if (max_pfn <= MAX_DMA32_PFN)
  79. return;
  80. if (!dma32_bootmem_ptr)
  81. return;
  82. free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
  83. dma32_bootmem_ptr = NULL;
  84. dma32_bootmem_size = 0;
  85. }
  86. void __init pci_iommu_alloc(void)
  87. {
  88. /* free the range so iommu could get some range less than 4G */
  89. dma32_free_bootmem();
  90. /*
  91. * The order of these functions is important for
  92. * fall-back/fail-over reasons
  93. */
  94. gart_iommu_hole_init();
  95. detect_calgary();
  96. detect_intel_iommu();
  97. amd_iommu_detect();
  98. pci_swiotlb_init();
  99. }
  100. unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
  101. {
  102. unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
  103. return size >> PAGE_SHIFT;
  104. }
  105. EXPORT_SYMBOL(iommu_num_pages);
  106. #endif
  107. /*
  108. * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
  109. * documentation.
  110. */
  111. static __init int iommu_setup(char *p)
  112. {
  113. iommu_merge = 1;
  114. if (!p)
  115. return -EINVAL;
  116. while (*p) {
  117. if (!strncmp(p, "off", 3))
  118. no_iommu = 1;
  119. /* gart_parse_options has more force support */
  120. if (!strncmp(p, "force", 5))
  121. force_iommu = 1;
  122. if (!strncmp(p, "noforce", 7)) {
  123. iommu_merge = 0;
  124. force_iommu = 0;
  125. }
  126. if (!strncmp(p, "biomerge", 8)) {
  127. iommu_bio_merge = 4096;
  128. iommu_merge = 1;
  129. force_iommu = 1;
  130. }
  131. if (!strncmp(p, "panic", 5))
  132. panic_on_overflow = 1;
  133. if (!strncmp(p, "nopanic", 7))
  134. panic_on_overflow = 0;
  135. if (!strncmp(p, "merge", 5)) {
  136. iommu_merge = 1;
  137. force_iommu = 1;
  138. }
  139. if (!strncmp(p, "nomerge", 7))
  140. iommu_merge = 0;
  141. if (!strncmp(p, "forcesac", 8))
  142. iommu_sac_force = 1;
  143. if (!strncmp(p, "allowdac", 8))
  144. forbid_dac = 0;
  145. if (!strncmp(p, "nodac", 5))
  146. forbid_dac = -1;
  147. if (!strncmp(p, "usedac", 6)) {
  148. forbid_dac = -1;
  149. return 1;
  150. }
  151. #ifdef CONFIG_SWIOTLB
  152. if (!strncmp(p, "soft", 4))
  153. swiotlb = 1;
  154. #endif
  155. gart_parse_options(p);
  156. #ifdef CONFIG_CALGARY_IOMMU
  157. if (!strncmp(p, "calgary", 7))
  158. use_calgary = 1;
  159. #endif /* CONFIG_CALGARY_IOMMU */
  160. p += strcspn(p, ",");
  161. if (*p == ',')
  162. ++p;
  163. }
  164. return 0;
  165. }
  166. early_param("iommu", iommu_setup);
  167. int dma_supported(struct device *dev, u64 mask)
  168. {
  169. struct dma_mapping_ops *ops = get_dma_ops(dev);
  170. #ifdef CONFIG_PCI
  171. if (mask > 0xffffffff && forbid_dac > 0) {
  172. dev_info(dev, "PCI: Disallowing DAC for device\n");
  173. return 0;
  174. }
  175. #endif
  176. if (ops->dma_supported)
  177. return ops->dma_supported(dev, mask);
  178. /* Copied from i386. Doesn't make much sense, because it will
  179. only work for pci_alloc_coherent.
  180. The caller just has to use GFP_DMA in this case. */
  181. if (mask < DMA_24BIT_MASK)
  182. return 0;
  183. /* Tell the device to use SAC when IOMMU force is on. This
  184. allows the driver to use cheaper accesses in some cases.
  185. Problem with this is that if we overflow the IOMMU area and
  186. return DAC as fallback address the device may not handle it
  187. correctly.
  188. As a special case some controllers have a 39bit address
  189. mode that is as efficient as 32bit (aic79xx). Don't force
  190. SAC for these. Assume all masks <= 40 bits are of this
  191. type. Normally this doesn't make any difference, but gives
  192. more gentle handling of IOMMU overflow. */
  193. if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
  194. dev_info(dev, "Force SAC with mask %Lx\n", mask);
  195. return 0;
  196. }
  197. return 1;
  198. }
  199. EXPORT_SYMBOL(dma_supported);
  200. /* Allocate DMA memory on node near device */
  201. static noinline struct page *
  202. dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
  203. {
  204. int node;
  205. node = dev_to_node(dev);
  206. return alloc_pages_node(node, gfp, order);
  207. }
  208. /*
  209. * Allocate memory for a coherent mapping.
  210. */
  211. void *
  212. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  213. gfp_t gfp)
  214. {
  215. struct dma_mapping_ops *ops = get_dma_ops(dev);
  216. void *memory = NULL;
  217. struct page *page;
  218. unsigned long dma_mask = 0;
  219. dma_addr_t bus;
  220. int noretry = 0;
  221. /* ignore region specifiers */
  222. gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  223. if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
  224. return memory;
  225. if (!dev) {
  226. dev = &fallback_dev;
  227. gfp |= GFP_DMA;
  228. }
  229. dma_mask = dev->coherent_dma_mask;
  230. if (dma_mask == 0)
  231. dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
  232. /* Device not DMA able */
  233. if (dev->dma_mask == NULL)
  234. return NULL;
  235. /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
  236. if (gfp & __GFP_DMA)
  237. noretry = 1;
  238. #ifdef CONFIG_X86_64
  239. /* Why <=? Even when the mask is smaller than 4GB it is often
  240. larger than 16MB and in this case we have a chance of
  241. finding fitting memory in the next higher zone first. If
  242. not retry with true GFP_DMA. -AK */
  243. if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  244. gfp |= GFP_DMA32;
  245. if (dma_mask < DMA_32BIT_MASK)
  246. noretry = 1;
  247. }
  248. #endif
  249. again:
  250. page = dma_alloc_pages(dev,
  251. noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
  252. if (page == NULL)
  253. return NULL;
  254. {
  255. int high, mmu;
  256. bus = page_to_phys(page);
  257. memory = page_address(page);
  258. high = (bus + size) >= dma_mask;
  259. mmu = high;
  260. if (force_iommu && !(gfp & GFP_DMA))
  261. mmu = 1;
  262. else if (high) {
  263. free_pages((unsigned long)memory,
  264. get_order(size));
  265. /* Don't use the 16MB ZONE_DMA unless absolutely
  266. needed. It's better to use remapping first. */
  267. if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
  268. gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
  269. goto again;
  270. }
  271. /* Let low level make its own zone decisions */
  272. gfp &= ~(GFP_DMA32|GFP_DMA);
  273. if (ops->alloc_coherent)
  274. return ops->alloc_coherent(dev, size,
  275. dma_handle, gfp);
  276. return NULL;
  277. }
  278. memset(memory, 0, size);
  279. if (!mmu) {
  280. *dma_handle = bus;
  281. return memory;
  282. }
  283. }
  284. if (ops->alloc_coherent) {
  285. free_pages((unsigned long)memory, get_order(size));
  286. gfp &= ~(GFP_DMA|GFP_DMA32);
  287. return ops->alloc_coherent(dev, size, dma_handle, gfp);
  288. }
  289. if (ops->map_simple) {
  290. *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
  291. size,
  292. PCI_DMA_BIDIRECTIONAL);
  293. if (*dma_handle != bad_dma_address)
  294. return memory;
  295. }
  296. if (panic_on_overflow)
  297. panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
  298. (unsigned long)size);
  299. free_pages((unsigned long)memory, get_order(size));
  300. return NULL;
  301. }
  302. EXPORT_SYMBOL(dma_alloc_coherent);
  303. /*
  304. * Unmap coherent memory.
  305. * The caller must ensure that the device has finished accessing the mapping.
  306. */
  307. void dma_free_coherent(struct device *dev, size_t size,
  308. void *vaddr, dma_addr_t bus)
  309. {
  310. struct dma_mapping_ops *ops = get_dma_ops(dev);
  311. int order = get_order(size);
  312. WARN_ON(irqs_disabled()); /* for portability */
  313. if (dma_release_from_coherent(dev, order, vaddr))
  314. return;
  315. if (ops->unmap_single)
  316. ops->unmap_single(dev, bus, size, 0);
  317. free_pages((unsigned long)vaddr, order);
  318. }
  319. EXPORT_SYMBOL(dma_free_coherent);
  320. static int __init pci_iommu_init(void)
  321. {
  322. calgary_iommu_init();
  323. intel_iommu_init();
  324. amd_iommu_init();
  325. gart_iommu_init();
  326. no_iommu_init();
  327. return 0;
  328. }
  329. void pci_iommu_shutdown(void)
  330. {
  331. gart_iommu_shutdown();
  332. }
  333. /* Must execute after PCI subsystem */
  334. fs_initcall(pci_iommu_init);
  335. #ifdef CONFIG_PCI
  336. /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
  337. static __devinit void via_no_dac(struct pci_dev *dev)
  338. {
  339. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
  340. printk(KERN_INFO "PCI: VIA PCI bridge detected."
  341. "Disabling DAC.\n");
  342. forbid_dac = 1;
  343. }
  344. }
  345. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
  346. #endif