浏览代码

x86/amd-iommu: Make np-cache a global flag

The non-present cache flag was IOMMU local until now which
doesn't make sense. Make this a global flag so we can remove
the lase user of 'struct iommu' in the map/unmap path.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Joerg Roedel 15 年之前
父节点
当前提交
318afd41d2
共有 3 个文件被更改,包括 10 次插入7 次删除
  1. 3 0
      arch/x86/include/asm/amd_iommu_types.h
  2. 1 7
      arch/x86/kernel/amd_iommu.c
  3. 6 0
      arch/x86/kernel/amd_iommu_init.c

+ 3 - 0
arch/x86/include/asm/amd_iommu_types.h

@@ -211,6 +211,9 @@ extern bool amd_iommu_dump;
 			printk(KERN_INFO "AMD-Vi: " format, ## arg);	\
 			printk(KERN_INFO "AMD-Vi: " format, ## arg);	\
 	} while(0);
 	} while(0);
 
 
+/* global flag if IOMMUs cache non-present entries */
+extern bool amd_iommu_np_cache;
+
 /*
 /*
  * Make iterating over all IOMMUs easier
  * Make iterating over all IOMMUs easier
  */
  */

+ 1 - 7
arch/x86/kernel/amd_iommu.c

@@ -131,12 +131,6 @@ static void amd_iommu_stats_init(void)
 
 
 #endif
 #endif
 
 
-/* returns !0 if the IOMMU is caching non-present entries in its TLB */
-static int iommu_has_npcache(struct amd_iommu *iommu)
-{
-	return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
-}
-
 /****************************************************************************
 /****************************************************************************
  *
  *
  * Interrupt handling functions
  * Interrupt handling functions
@@ -1713,7 +1707,7 @@ retry:
 	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
 	if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
 		iommu_flush_tlb(&dma_dom->domain);
 		iommu_flush_tlb(&dma_dom->domain);
 		dma_dom->need_flush = false;
 		dma_dom->need_flush = false;
-	} else if (unlikely(iommu_has_npcache(iommu)))
+	} else if (unlikely(amd_iommu_np_cache))
 		iommu_flush_pages(&dma_dom->domain, address, size);
 		iommu_flush_pages(&dma_dom->domain, address, size);
 
 
 out:
 out:

+ 6 - 0
arch/x86/kernel/amd_iommu_init.c

@@ -141,6 +141,9 @@ LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
 struct amd_iommu *amd_iommus[MAX_IOMMUS];
 struct amd_iommu *amd_iommus[MAX_IOMMUS];
 int amd_iommus_present;
 int amd_iommus_present;
 
 
+/* IOMMUs have a non-present cache? */
+bool amd_iommu_np_cache __read_mostly;
+
 /*
 /*
  * List of protection domains - used during resume
  * List of protection domains - used during resume
  */
  */
@@ -891,6 +894,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
 	init_iommu_from_acpi(iommu, h);
 	init_iommu_from_acpi(iommu, h);
 	init_iommu_devices(iommu);
 	init_iommu_devices(iommu);
 
 
+	if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+		amd_iommu_np_cache = true;
+
 	return pci_enable_device(iommu->dev);
 	return pci_enable_device(iommu->dev);
 }
 }