|
@@ -4000,4 +4000,130 @@ static void free_irte(u16 devid, int index)
|
|
|
iommu_completion_wait(iommu);
|
|
|
}
|
|
|
|
|
|
+static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
|
|
+ unsigned int destination, int vector,
|
|
|
+ struct io_apic_irq_attr *attr)
|
|
|
+{
|
|
|
+ struct irq_remap_table *table;
|
|
|
+ struct irq_2_iommu *irte_info;
|
|
|
+ struct irq_cfg *cfg;
|
|
|
+ union irte irte;
|
|
|
+ int ioapic_id;
|
|
|
+ int index;
|
|
|
+ int devid;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ cfg = irq_get_chip_data(irq);
|
|
|
+ if (!cfg)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ irte_info = &cfg->irq_2_iommu;
|
|
|
+ ioapic_id = mpc_ioapic_id(attr->ioapic);
|
|
|
+ devid = get_ioapic_devid(ioapic_id);
|
|
|
+
|
|
|
+ if (devid < 0)
|
|
|
+ return devid;
|
|
|
+
|
|
|
+ table = get_irq_table(devid, true);
|
|
|
+ if (table == NULL)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ index = attr->ioapic_pin;
|
|
|
+
|
|
|
+ /* Setup IRQ remapping info */
|
|
|
+ irte_info->sub_handle = devid;
|
|
|
+ irte_info->irte_index = index;
|
|
|
+ irte_info->iommu = (void *)cfg;
|
|
|
+
|
|
|
+ /* Setup IRTE for IOMMU */
|
|
|
+ irte.val = 0;
|
|
|
+ irte.fields.vector = vector;
|
|
|
+ irte.fields.int_type = apic->irq_delivery_mode;
|
|
|
+ irte.fields.destination = destination;
|
|
|
+ irte.fields.dm = apic->irq_dest_mode;
|
|
|
+ irte.fields.valid = 1;
|
|
|
+
|
|
|
+ ret = modify_irte(devid, index, irte);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /* Setup IOAPIC entry */
|
|
|
+ memset(entry, 0, sizeof(*entry));
|
|
|
+
|
|
|
+ entry->vector = index;
|
|
|
+ entry->mask = 0;
|
|
|
+ entry->trigger = attr->trigger;
|
|
|
+ entry->polarity = attr->polarity;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mask level triggered irqs.
|
|
|
+ * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
|
|
|
+ */
|
|
|
+ if (attr->trigger)
|
|
|
+ entry->mask = 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|
|
+ bool force)
|
|
|
+{
|
|
|
+ struct irq_2_iommu *irte_info;
|
|
|
+ unsigned int dest, irq;
|
|
|
+ struct irq_cfg *cfg;
|
|
|
+ union irte irte;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ if (!config_enabled(CONFIG_SMP))
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ cfg = data->chip_data;
|
|
|
+ irq = data->irq;
|
|
|
+ irte_info = &cfg->irq_2_iommu;
|
|
|
+
|
|
|
+ if (!cpumask_intersects(mask, cpu_online_mask))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (assign_irq_vector(irq, cfg, mask))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
|
|
|
+ if (err) {
|
|
|
+ if (assign_irq_vector(irq, cfg, data->affinity))
|
|
|
+ pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
+ irte.fields.vector = cfg->vector;
|
|
|
+ irte.fields.destination = dest;
|
|
|
+
|
|
|
+ modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
|
|
|
+
|
|
|
+ if (cfg->move_in_progress)
|
|
|
+ send_cleanup_vector(cfg);
|
|
|
+
|
|
|
+ cpumask_copy(data->affinity, mask);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int free_irq(int irq)
|
|
|
+{
|
|
|
+ struct irq_2_iommu *irte_info;
|
|
|
+ struct irq_cfg *cfg;
|
|
|
+
|
|
|
+ cfg = irq_get_chip_data(irq);
|
|
|
+ if (!cfg)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ irte_info = &cfg->irq_2_iommu;
|
|
|
+
|
|
|
+ free_irte(irte_info->sub_handle, irte_info->irte_index);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
#endif
|