123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316 |
- #include <linux/dma-mapping.h>
- #include <linux/dma-debug.h>
- #include <linux/dmar.h>
- #include <linux/bootmem.h>
- #include <linux/pci.h>
- #include <asm/proto.h>
- #include <asm/dma.h>
- #include <asm/iommu.h>
- #include <asm/gart.h>
- #include <asm/calgary.h>
- #include <asm/amd_iommu.h>
- static int forbid_dac __read_mostly;
- struct dma_map_ops *dma_ops;
- EXPORT_SYMBOL(dma_ops);
- static int iommu_sac_force __read_mostly;
- #ifdef CONFIG_IOMMU_DEBUG
- int panic_on_overflow __read_mostly = 1;
- int force_iommu __read_mostly = 1;
- #else
- int panic_on_overflow __read_mostly = 0;
- int force_iommu __read_mostly = 0;
- #endif
- int iommu_merge __read_mostly = 0;
- int no_iommu __read_mostly;
- /* Set this to 1 if there is a HW IOMMU in the system */
- int iommu_detected __read_mostly = 0;
- int iommu_pass_through;
- dma_addr_t bad_dma_address __read_mostly = 0;
- EXPORT_SYMBOL(bad_dma_address);
- /* Dummy device used for NULL arguments (normally ISA). Better would
- be probably a smaller DMA mask, but this is bug-to-bug compatible
- to older i386. */
- struct device x86_dma_fallback_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
- };
- EXPORT_SYMBOL(x86_dma_fallback_dev);
- /* Number of entries preallocated for DMA-API debugging */
- #define PREALLOC_DMA_DEBUG_ENTRIES 32768
- int dma_set_mask(struct device *dev, u64 mask)
- {
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
- return 0;
- }
- EXPORT_SYMBOL(dma_set_mask);
- #ifdef CONFIG_X86_64
- static __initdata void *dma32_bootmem_ptr;
- static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
- static int __init parse_dma32_size_opt(char *p)
- {
- if (!p)
- return -EINVAL;
- dma32_bootmem_size = memparse(p, &p);
- return 0;
- }
- early_param("dma32_size", parse_dma32_size_opt);
- void __init dma32_reserve_bootmem(void)
- {
- unsigned long size, align;
- if (max_pfn <= MAX_DMA32_PFN)
- return;
- /*
- * check aperture_64.c allocate_aperture() for reason about
- * using 512M as goal
- */
- align = 64ULL<<20;
- size = roundup(dma32_bootmem_size, align);
- dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
- 512ULL<<20);
- if (dma32_bootmem_ptr)
- dma32_bootmem_size = size;
- else
- dma32_bootmem_size = 0;
- }
- static void __init dma32_free_bootmem(void)
- {
- if (max_pfn <= MAX_DMA32_PFN)
- return;
- if (!dma32_bootmem_ptr)
- return;
- free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
- dma32_bootmem_ptr = NULL;
- dma32_bootmem_size = 0;
- }
- #endif
- void __init pci_iommu_alloc(void)
- {
- #ifdef CONFIG_X86_64
- /* free the range so iommu could get some range less than 4G */
- dma32_free_bootmem();
- #endif
- /*
- * The order of these functions is important for
- * fall-back/fail-over reasons
- */
- gart_iommu_hole_init();
- detect_calgary();
- detect_intel_iommu();
- amd_iommu_detect();
- pci_swiotlb_init();
- }
- void *dma_generic_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_addr, gfp_t flag)
- {
- unsigned long dma_mask;
- struct page *page;
- dma_addr_t addr;
- dma_mask = dma_alloc_coherent_mask(dev, flag);
- flag |= __GFP_ZERO;
- again:
- page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
- if (!page)
- return NULL;
- addr = page_to_phys(page);
- if (!is_buffer_dma_capable(dma_mask, addr, size)) {
- __free_pages(page, get_order(size));
- if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
- flag = (flag & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
- return NULL;
- }
- *dma_addr = addr;
- return page_address(page);
- }
- /*
- * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
- * documentation.
- */
- static __init int iommu_setup(char *p)
- {
- iommu_merge = 1;
- if (!p)
- return -EINVAL;
- while (*p) {
- if (!strncmp(p, "off", 3))
- no_iommu = 1;
- /* gart_parse_options has more force support */
- if (!strncmp(p, "force", 5))
- force_iommu = 1;
- if (!strncmp(p, "noforce", 7)) {
- iommu_merge = 0;
- force_iommu = 0;
- }
- if (!strncmp(p, "biomerge", 8)) {
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "panic", 5))
- panic_on_overflow = 1;
- if (!strncmp(p, "nopanic", 7))
- panic_on_overflow = 0;
- if (!strncmp(p, "merge", 5)) {
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "nomerge", 7))
- iommu_merge = 0;
- if (!strncmp(p, "forcesac", 8))
- iommu_sac_force = 1;
- if (!strncmp(p, "allowdac", 8))
- forbid_dac = 0;
- if (!strncmp(p, "nodac", 5))
- forbid_dac = -1;
- if (!strncmp(p, "usedac", 6)) {
- forbid_dac = -1;
- return 1;
- }
- #ifdef CONFIG_SWIOTLB
- if (!strncmp(p, "soft", 4))
- swiotlb = 1;
- #endif
- if (!strncmp(p, "pt", 2)) {
- iommu_pass_through = 1;
- return 1;
- }
- gart_parse_options(p);
- #ifdef CONFIG_CALGARY_IOMMU
- if (!strncmp(p, "calgary", 7))
- use_calgary = 1;
- #endif /* CONFIG_CALGARY_IOMMU */
- p += strcspn(p, ",");
- if (*p == ',')
- ++p;
- }
- return 0;
- }
- early_param("iommu", iommu_setup);
- int dma_supported(struct device *dev, u64 mask)
- {
- struct dma_map_ops *ops = get_dma_ops(dev);
- #ifdef CONFIG_PCI
- if (mask > 0xffffffff && forbid_dac > 0) {
- dev_info(dev, "PCI: Disallowing DAC for device\n");
- return 0;
- }
- #endif
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_BIT_MASK(24))
- return 0;
- /* Tell the device to use SAC when IOMMU force is on. This
- allows the driver to use cheaper accesses in some cases.
- Problem with this is that if we overflow the IOMMU area and
- return DAC as fallback address the device may not handle it
- correctly.
- As a special case some controllers have a 39bit address
- mode that is as efficient as 32bit (aic79xx). Don't force
- SAC for these. Assume all masks <= 40 bits are of this
- type. Normally this doesn't make any difference, but gives
- more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
- dev_info(dev, "Force SAC with mask %Lx\n", mask);
- return 0;
- }
- return 1;
- }
- EXPORT_SYMBOL(dma_supported);
- static int __init pci_iommu_init(void)
- {
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
- #ifdef CONFIG_PCI
- dma_debug_add_bus(&pci_bus_type);
- #endif
- calgary_iommu_init();
- intel_iommu_init();
- amd_iommu_init();
- gart_iommu_init();
- no_iommu_init();
- return 0;
- }
- void pci_iommu_shutdown(void)
- {
- gart_iommu_shutdown();
- amd_iommu_shutdown();
- }
- /* Must execute after PCI subsystem */
- fs_initcall(pci_iommu_init);
- #ifdef CONFIG_PCI
- /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
- static __devinit void via_no_dac(struct pci_dev *dev)
- {
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
- dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
- forbid_dac = 1;
- }
- }
- DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
- #endif
|