|
@@ -57,17 +57,9 @@
|
|
|
* physically contiguous memory regions it is mapping into page sizes
|
|
|
* that we support.
|
|
|
*
|
|
|
- * Traditionally the IOMMU core just handed us the mappings directly,
|
|
|
- * after making sure the size is an order of a 4KiB page and that the
|
|
|
- * mapping has natural alignment.
|
|
|
- *
|
|
|
- * To retain this behavior, we currently advertise that we support
|
|
|
- * all page sizes that are an order of 4KiB.
|
|
|
- *
|
|
|
- * If at some point we'd like to utilize the IOMMU core's new behavior,
|
|
|
- * we could change this to advertise the real page sizes we support.
|
|
|
+ * 512GB Pages are not supported due to a hardware bug
|
|
|
*/
|
|
|
-#define AMD_IOMMU_PGSIZES (~0xFFFUL)
|
|
|
+#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
|
|
|
|
|
|
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
|
|
|
|
|
@@ -140,6 +132,9 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
|
|
|
list_del(&dev_data->dev_data_list);
|
|
|
spin_unlock_irqrestore(&dev_data_list_lock, flags);
|
|
|
|
|
|
+ if (dev_data->group)
|
|
|
+ iommu_group_put(dev_data->group);
|
|
|
+
|
|
|
kfree(dev_data);
|
|
|
}
|
|
|
|
|
@@ -274,41 +269,23 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
|
|
*from = to;
|
|
|
}
|
|
|
|
|
|
-#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
|
|
-
|
|
|
-static int iommu_init_device(struct device *dev)
|
|
|
+static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
|
|
|
{
|
|
|
- struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
|
|
|
- struct iommu_dev_data *dev_data;
|
|
|
- struct iommu_group *group;
|
|
|
- u16 alias;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (dev->archdata.iommu)
|
|
|
- return 0;
|
|
|
-
|
|
|
- dev_data = find_dev_data(get_device_id(dev));
|
|
|
- if (!dev_data)
|
|
|
- return -ENOMEM;
|
|
|
-
|
|
|
- alias = amd_iommu_alias_table[dev_data->devid];
|
|
|
- if (alias != dev_data->devid) {
|
|
|
- struct iommu_dev_data *alias_data;
|
|
|
+ while (!bus->self) {
|
|
|
+ if (!pci_is_root_bus(bus))
|
|
|
+ bus = bus->parent;
|
|
|
+ else
|
|
|
+ return ERR_PTR(-ENODEV);
|
|
|
+ }
|
|
|
|
|
|
- alias_data = find_dev_data(alias);
|
|
|
- if (alias_data == NULL) {
|
|
|
- pr_err("AMD-Vi: Warning: Unhandled device %s\n",
|
|
|
- dev_name(dev));
|
|
|
- free_dev_data(dev_data);
|
|
|
- return -ENOTSUPP;
|
|
|
- }
|
|
|
- dev_data->alias_data = alias_data;
|
|
|
+ return bus;
|
|
|
+}
|
|
|
|
|
|
- dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
|
|
|
- }
|
|
|
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
|
|
|
|
|
- if (dma_pdev == NULL)
|
|
|
- dma_pdev = pci_dev_get(pdev);
|
|
|
+static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
|
|
|
+{
|
|
|
+ struct pci_dev *dma_pdev = pdev;
|
|
|
|
|
|
/* Account for quirked devices */
|
|
|
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
|
|
@@ -330,14 +307,9 @@ static int iommu_init_device(struct device *dev)
|
|
|
* Finding the next device may require skipping virtual buses.
|
|
|
*/
|
|
|
while (!pci_is_root_bus(dma_pdev->bus)) {
|
|
|
- struct pci_bus *bus = dma_pdev->bus;
|
|
|
-
|
|
|
- while (!bus->self) {
|
|
|
- if (!pci_is_root_bus(bus))
|
|
|
- bus = bus->parent;
|
|
|
- else
|
|
|
- goto root_bus;
|
|
|
- }
|
|
|
+ struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
|
|
|
+ if (IS_ERR(bus))
|
|
|
+ break;
|
|
|
|
|
|
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
|
|
|
break;
|
|
@@ -345,19 +317,137 @@ static int iommu_init_device(struct device *dev)
|
|
|
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
|
|
|
}
|
|
|
|
|
|
-root_bus:
|
|
|
- group = iommu_group_get(&dma_pdev->dev);
|
|
|
- pci_dev_put(dma_pdev);
|
|
|
+ return dma_pdev;
|
|
|
+}
|
|
|
+
|
|
|
+static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
|
|
|
+{
|
|
|
+ struct iommu_group *group = iommu_group_get(&pdev->dev);
|
|
|
+ int ret;
|
|
|
+
|
|
|
if (!group) {
|
|
|
group = iommu_group_alloc();
|
|
|
if (IS_ERR(group))
|
|
|
return PTR_ERR(group);
|
|
|
+
|
|
|
+ WARN_ON(&pdev->dev != dev);
|
|
|
}
|
|
|
|
|
|
ret = iommu_group_add_device(group, dev);
|
|
|
-
|
|
|
iommu_group_put(group);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
|
|
|
+ struct device *dev)
|
|
|
+{
|
|
|
+ if (!dev_data->group) {
|
|
|
+ struct iommu_group *group = iommu_group_alloc();
|
|
|
+ if (IS_ERR(group))
|
|
|
+ return PTR_ERR(group);
|
|
|
+
|
|
|
+ dev_data->group = group;
|
|
|
+ }
|
|
|
+
|
|
|
+ return iommu_group_add_device(dev_data->group, dev);
|
|
|
+}
|
|
|
+
|
|
|
+static int init_iommu_group(struct device *dev)
|
|
|
+{
|
|
|
+ struct iommu_dev_data *dev_data;
|
|
|
+ struct iommu_group *group;
|
|
|
+ struct pci_dev *dma_pdev;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ group = iommu_group_get(dev);
|
|
|
+ if (group) {
|
|
|
+ iommu_group_put(group);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ dev_data = find_dev_data(get_device_id(dev));
|
|
|
+ if (!dev_data)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ if (dev_data->alias_data) {
|
|
|
+ u16 alias;
|
|
|
+ struct pci_bus *bus;
|
|
|
+
|
|
|
+ if (dev_data->alias_data->group)
|
|
|
+ goto use_group;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the alias device exists, it's effectively just a first
|
|
|
+ * level quirk for finding the DMA source.
|
|
|
+ */
|
|
|
+ alias = amd_iommu_alias_table[dev_data->devid];
|
|
|
+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
|
|
|
+ if (dma_pdev) {
|
|
|
+ dma_pdev = get_isolation_root(dma_pdev);
|
|
|
+ goto use_pdev;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the alias is virtual, try to find a parent device
|
|
|
+ * and test whether the IOMMU group is actualy rooted above
|
|
|
+ * the alias. Be careful to also test the parent device if
|
|
|
+ * we think the alias is the root of the group.
|
|
|
+ */
|
|
|
+ bus = pci_find_bus(0, alias >> 8);
|
|
|
+ if (!bus)
|
|
|
+ goto use_group;
|
|
|
+
|
|
|
+ bus = find_hosted_bus(bus);
|
|
|
+ if (IS_ERR(bus) || !bus->self)
|
|
|
+ goto use_group;
|
|
|
+
|
|
|
+ dma_pdev = get_isolation_root(pci_dev_get(bus->self));
|
|
|
+ if (dma_pdev != bus->self || (dma_pdev->multifunction &&
|
|
|
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
|
|
|
+ goto use_pdev;
|
|
|
+
|
|
|
+ pci_dev_put(dma_pdev);
|
|
|
+ goto use_group;
|
|
|
+ }
|
|
|
+
|
|
|
+ dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
|
|
|
+use_pdev:
|
|
|
+ ret = use_pdev_iommu_group(dma_pdev, dev);
|
|
|
+ pci_dev_put(dma_pdev);
|
|
|
+ return ret;
|
|
|
+use_group:
|
|
|
+ return use_dev_data_iommu_group(dev_data->alias_data, dev);
|
|
|
+}
|
|
|
+
|
|
|
+static int iommu_init_device(struct device *dev)
|
|
|
+{
|
|
|
+ struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
+ struct iommu_dev_data *dev_data;
|
|
|
+ u16 alias;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (dev->archdata.iommu)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ dev_data = find_dev_data(get_device_id(dev));
|
|
|
+ if (!dev_data)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ alias = amd_iommu_alias_table[dev_data->devid];
|
|
|
+ if (alias != dev_data->devid) {
|
|
|
+ struct iommu_dev_data *alias_data;
|
|
|
+
|
|
|
+ alias_data = find_dev_data(alias);
|
|
|
+ if (alias_data == NULL) {
|
|
|
+ pr_err("AMD-Vi: Warning: Unhandled device %s\n",
|
|
|
+ dev_name(dev));
|
|
|
+ free_dev_data(dev_data);
|
|
|
+ return -ENOTSUPP;
|
|
|
+ }
|
|
|
+ dev_data->alias_data = alias_data;
|
|
|
+ }
|
|
|
|
|
|
+ ret = init_iommu_group(dev);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|