|
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
|
|
|
return (pte->val & 3) != 0;
|
|
|
}
|
|
|
|
|
|
+static inline bool dma_pte_superpage(struct dma_pte *pte)
|
|
|
+{
|
|
|
+ return (pte->val & (1 << 7));
|
|
|
+}
|
|
|
+
|
|
|
static inline int first_pte_in_page(struct dma_pte *pte)
|
|
|
{
|
|
|
return !((unsigned long)pte & ~VTD_PAGE_MASK);
|
|
@@ -404,6 +409,9 @@ static int dmar_forcedac;
|
|
|
static int intel_iommu_strict;
|
|
|
static int intel_iommu_superpage = 1;
|
|
|
|
|
|
+int intel_iommu_gfx_mapped;
|
|
|
+EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
|
|
|
+
|
|
|
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
|
|
|
static DEFINE_SPINLOCK(device_domain_lock);
|
|
|
static LIST_HEAD(device_domain_list);
|
|
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
|
|
|
|
|
|
static void domain_update_iommu_superpage(struct dmar_domain *domain)
|
|
|
{
|
|
|
- int i, mask = 0xf;
|
|
|
+ struct dmar_drhd_unit *drhd;
|
|
|
+ struct intel_iommu *iommu = NULL;
|
|
|
+ int mask = 0xf;
|
|
|
|
|
|
if (!intel_iommu_superpage) {
|
|
|
domain->iommu_superpage = 0;
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- domain->iommu_superpage = 4; /* 1TiB */
|
|
|
-
|
|
|
- for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
|
|
|
- mask |= cap_super_page_val(g_iommus[i]->cap);
|
|
|
+ /* set iommu_superpage to the smallest common denominator */
|
|
|
+ for_each_active_iommu(iommu, drhd) {
|
|
|
+ mask &= cap_super_page_val(iommu->cap);
|
|
|
if (!mask) {
|
|
|
break;
|
|
|
}
|
|
@@ -730,29 +739,23 @@ out:
|
|
|
}
|
|
|
|
|
|
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
|
|
- unsigned long pfn, int large_level)
|
|
|
+ unsigned long pfn, int target_level)
|
|
|
{
|
|
|
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
struct dma_pte *parent, *pte = NULL;
|
|
|
int level = agaw_to_level(domain->agaw);
|
|
|
- int offset, target_level;
|
|
|
+ int offset;
|
|
|
|
|
|
BUG_ON(!domain->pgd);
|
|
|
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
|
|
|
parent = domain->pgd;
|
|
|
|
|
|
- /* Search pte */
|
|
|
- if (!large_level)
|
|
|
- target_level = 1;
|
|
|
- else
|
|
|
- target_level = large_level;
|
|
|
-
|
|
|
while (level > 0) {
|
|
|
void *tmp_page;
|
|
|
|
|
|
offset = pfn_level_offset(pfn, level);
|
|
|
pte = &parent[offset];
|
|
|
- if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
|
|
|
+ if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
|
|
|
break;
|
|
|
if (level == target_level)
|
|
|
break;
|
|
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
|
|
|
}
|
|
|
|
|
|
/* clear last level pte, a tlb flush should be followed */
|
|
|
-static void dma_pte_clear_range(struct dmar_domain *domain,
|
|
|
+static int dma_pte_clear_range(struct dmar_domain *domain,
|
|
|
unsigned long start_pfn,
|
|
|
unsigned long last_pfn)
|
|
|
{
|
|
|
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
|
|
unsigned int large_page = 1;
|
|
|
struct dma_pte *first_pte, *pte;
|
|
|
+ int order;
|
|
|
|
|
|
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
|
|
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
|
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
|
|
|
(void *)pte - (void *)first_pte);
|
|
|
|
|
|
} while (start_pfn && start_pfn <= last_pfn);
|
|
|
+
|
|
|
+ order = (large_page - 1) * 9;
|
|
|
+ return order;
|
|
|
}
|
|
|
|
|
|
/* free page table pages. last level pte should already be cleared */
|
|
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (dmar_map_gfx)
|
|
|
- return;
|
|
|
-
|
|
|
for_each_drhd_unit(drhd) {
|
|
|
int i;
|
|
|
if (drhd->ignored || drhd->include_all)
|
|
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void)
|
|
|
|
|
|
for (i = 0; i < drhd->devices_cnt; i++)
|
|
|
if (drhd->devices[i] &&
|
|
|
- !IS_GFX_DEVICE(drhd->devices[i]))
|
|
|
+ !IS_GFX_DEVICE(drhd->devices[i]))
|
|
|
break;
|
|
|
|
|
|
if (i < drhd->devices_cnt)
|
|
|
continue;
|
|
|
|
|
|
- /* bypass IOMMU if it is just for gfx devices */
|
|
|
- drhd->ignored = 1;
|
|
|
- for (i = 0; i < drhd->devices_cnt; i++) {
|
|
|
- if (!drhd->devices[i])
|
|
|
- continue;
|
|
|
- drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
|
|
+ /* This IOMMU has *only* gfx devices. Either bypass it or
|
|
|
+ set the gfx_mapped flag, as appropriate */
|
|
|
+ if (dmar_map_gfx) {
|
|
|
+ intel_iommu_gfx_mapped = 1;
|
|
|
+ } else {
|
|
|
+ drhd->ignored = 1;
|
|
|
+ for (i = 0; i < drhd->devices_cnt; i++) {
|
|
|
+ if (!drhd->devices[i])
|
|
|
+ continue;
|
|
|
+ drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
|
|
|
found = 1;
|
|
|
}
|
|
|
|
|
|
+ spin_unlock_irqrestore(&device_domain_lock, flags);
|
|
|
+
|
|
|
if (found == 0) {
|
|
|
unsigned long tmp_flags;
|
|
|
spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
|
|
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
|
|
|
spin_unlock_irqrestore(&iommu->lock, tmp_flags);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&device_domain_lock, flags);
|
|
|
}
|
|
|
|
|
|
static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
|
|
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
|
|
|
vm_domain_exit(dmar_domain);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
+ domain_update_iommu_cap(dmar_domain);
|
|
|
domain->priv = dmar_domain;
|
|
|
|
|
|
return 0;
|
|
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
|
|
|
{
|
|
|
struct dmar_domain *dmar_domain = domain->priv;
|
|
|
size_t size = PAGE_SIZE << gfp_order;
|
|
|
+ int order;
|
|
|
|
|
|
- dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
|
|
|
+ order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
|
|
|
(iova + size - 1) >> VTD_PAGE_SHIFT);
|
|
|
|
|
|
if (dmar_domain->max_addr == iova + size)
|
|
|
dmar_domain->max_addr = iova;
|
|
|
|
|
|
- return gfp_order;
|
|
|
+ return order;
|
|
|
}
|
|
|
|
|
|
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
|
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
|
|
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
|
|
|
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
|
|
|
dmar_map_gfx = 0;
|
|
|
- }
|
|
|
+ } else if (dmar_map_gfx) {
|
|
|
+ /* we have to ensure the gfx device is idle before we flush */
|
|
|
+ printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
|
|
|
+ intel_iommu_strict = 1;
|
|
|
+ }
|
|
|
}
|
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
|
|
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
|