|
@@ -1381,7 +1381,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
|
|
|
* Init the device table to not allow DMA access for devices and
|
|
|
* suppress all page faults
|
|
|
*/
|
|
|
-static void init_device_table(void)
|
|
|
+static void init_device_table_dma(void)
|
|
|
{
|
|
|
u32 devid;
|
|
|
|
|
@@ -1391,6 +1391,17 @@ static void init_device_table(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void init_device_table(void)
|
|
|
+{
|
|
|
+ u32 devid;
|
|
|
+
|
|
|
+ if (!amd_iommu_irq_remap)
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
|
|
|
+ set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
|
|
|
+}
|
|
|
+
|
|
|
static void iommu_init_flags(struct amd_iommu *iommu)
|
|
|
{
|
|
|
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
|
|
@@ -1781,8 +1792,14 @@ static bool detect_ivrs(void)
|
|
|
|
|
|
static int amd_iommu_init_dma(void)
|
|
|
{
|
|
|
+ struct amd_iommu *iommu;
|
|
|
int ret;
|
|
|
|
|
|
+ init_device_table_dma();
|
|
|
+
|
|
|
+ for_each_iommu(iommu)
|
|
|
+ iommu_flush_all_caches(iommu);
|
|
|
+
|
|
|
if (iommu_pass_through)
|
|
|
ret = amd_iommu_init_passthrough();
|
|
|
else
|