Browse Source

x86/amd-iommu: Remove amd_iommu_pd_table

The data that was stored in this table is now available in
dev->archdata.iommu. So this table is not longer necessary.
This patch removes the remaining uses of that variable and
removes it from the code.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Joerg Roedel 15 years ago
parent
commit
492667dacc

+ 0 - 3
arch/x86/include/asm/amd_iommu_types.h

@@ -457,9 +457,6 @@ extern unsigned amd_iommu_aperture_order;
 /* largest PCI device id we expect translation requests for */
 /* largest PCI device id we expect translation requests for */
 extern u16 amd_iommu_last_bdf;
 extern u16 amd_iommu_last_bdf;
 
 
-/* data structures for protection domain handling */
-extern struct protection_domain **amd_iommu_pd_table;
-
 /* allocation bitmap for domain ids */
 /* allocation bitmap for domain ids */
 extern unsigned long *amd_iommu_pd_alloc_bitmap;
 extern unsigned long *amd_iommu_pd_alloc_bitmap;
 
 

+ 11 - 24
arch/x86/kernel/amd_iommu.c

@@ -1309,8 +1309,6 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
 {
 {
 	u64 pte_root = virt_to_phys(domain->pt_root);
 	u64 pte_root = virt_to_phys(domain->pt_root);
 
 
-	BUG_ON(amd_iommu_pd_table[devid] != NULL);
-
 	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
 	pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
 		    << DEV_ENTRY_MODE_SHIFT;
 		    << DEV_ENTRY_MODE_SHIFT;
 	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
 	pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
@@ -1318,20 +1316,10 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain)
 	amd_iommu_dev_table[devid].data[2] = domain->id;
 	amd_iommu_dev_table[devid].data[2] = domain->id;
 	amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
 	amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
 	amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
 	amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
-
-	amd_iommu_pd_table[devid] = domain;
-
 }
 }
 
 
 static void clear_dte_entry(u16 devid)
 static void clear_dte_entry(u16 devid)
 {
 {
-	struct protection_domain *domain = amd_iommu_pd_table[devid];
-
-	BUG_ON(domain == NULL);
-
-	/* remove domain from the lookup table */
-	amd_iommu_pd_table[devid] = NULL;
-
 	/* remove entry from the device table seen by the hardware */
 	/* remove entry from the device table seen by the hardware */
 	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
 	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
 	amd_iommu_dev_table[devid].data[1] = 0;
 	amd_iommu_dev_table[devid].data[1] = 0;
@@ -1641,15 +1629,11 @@ static struct protection_domain *get_domain(struct device *dev)
 
 
 static void update_device_table(struct protection_domain *domain)
 static void update_device_table(struct protection_domain *domain)
 {
 {
-	unsigned long flags;
-	int i;
+	struct iommu_dev_data *dev_data;
 
 
-	for (i = 0; i <= amd_iommu_last_bdf; ++i) {
-		if (amd_iommu_pd_table[i] != domain)
-			continue;
-		write_lock_irqsave(&amd_iommu_devtable_lock, flags);
-		set_dte_entry(i, domain);
-		write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+	list_for_each_entry(dev_data, &domain->dev_list, list) {
+		u16 devid = get_device_id(dev_data->dev);
+		set_dte_entry(devid, domain);
 	}
 	}
 }
 }
 
 
@@ -2259,14 +2243,17 @@ free_domains:
 
 
 static void cleanup_domain(struct protection_domain *domain)
 static void cleanup_domain(struct protection_domain *domain)
 {
 {
+	struct iommu_dev_data *dev_data, *next;
 	unsigned long flags;
 	unsigned long flags;
-	u16 devid;
 
 
 	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 	write_lock_irqsave(&amd_iommu_devtable_lock, flags);
 
 
-	for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
-		if (amd_iommu_pd_table[devid] == domain)
-			clear_dte_entry(devid);
+	list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
+		struct device *dev = dev_data->dev;
+
+		do_detach(dev);
+		atomic_set(&dev_data->bind, 0);
+	}
 
 
 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 	write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 }

+ 0 - 18
arch/x86/kernel/amd_iommu_init.c

@@ -164,12 +164,6 @@ u16 *amd_iommu_alias_table;
  */
  */
 struct amd_iommu **amd_iommu_rlookup_table;
 struct amd_iommu **amd_iommu_rlookup_table;
 
 
-/*
- * The pd table (protection domain table) is used to find the protection domain
- * data structure a device belongs to. Indexed with the PCI device id too.
- */
-struct protection_domain **amd_iommu_pd_table;
-
 /*
 /*
  * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
  * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
  * to know which ones are already in use.
  * to know which ones are already in use.
@@ -1238,15 +1232,6 @@ static int __init amd_iommu_init(void)
 	if (amd_iommu_rlookup_table == NULL)
 	if (amd_iommu_rlookup_table == NULL)
 		goto free;
 		goto free;
 
 
-	/*
-	 * Protection Domain table - maps devices to protection domains
-	 * This table has the same size as the rlookup_table
-	 */
-	amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
-				     get_order(rlookup_table_size));
-	if (amd_iommu_pd_table == NULL)
-		goto free;
-
 	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
 	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
 					    GFP_KERNEL | __GFP_ZERO,
 					    GFP_KERNEL | __GFP_ZERO,
 					    get_order(MAX_DOMAIN_ID/8));
 					    get_order(MAX_DOMAIN_ID/8));
@@ -1314,9 +1299,6 @@ free:
 	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
 	free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
 		   get_order(MAX_DOMAIN_ID/8));
 		   get_order(MAX_DOMAIN_ID/8));
 
 
-	free_pages((unsigned long)amd_iommu_pd_table,
-		   get_order(rlookup_table_size));
-
 	free_pages((unsigned long)amd_iommu_rlookup_table,
 	free_pages((unsigned long)amd_iommu_rlookup_table,
 		   get_order(rlookup_table_size));
 		   get_order(rlookup_table_size));