|
@@ -31,6 +31,7 @@ struct hpet_scope {
|
|
|
};
|
|
|
|
|
|
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
|
|
|
+#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
|
|
|
|
|
|
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
|
|
|
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
|
|
@@ -814,6 +815,93 @@ error:
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
+static void prepare_irte(struct irte *irte, int vector,
|
|
|
+ unsigned int dest)
|
|
|
+{
|
|
|
+ memset(irte, 0, sizeof(*irte));
|
|
|
+
|
|
|
+ irte->present = 1;
|
|
|
+ irte->dst_mode = apic->irq_dest_mode;
|
|
|
+ /*
|
|
|
+ * Trigger mode in the IRTE will always be edge, and for IO-APIC, the
|
|
|
+ * actual level or edge trigger will be setup in the IO-APIC
|
|
|
+ * RTE. This will help simplify level triggered irq migration.
|
|
|
+ * For more details, see the comments (in io_apic.c) explainig IO-APIC
|
|
|
+ * irq migration in the presence of interrupt-remapping.
|
|
|
+ */
|
|
|
+ irte->trigger_mode = 0;
|
|
|
+ irte->dlvry_mode = apic->irq_delivery_mode;
|
|
|
+ irte->vector = vector;
|
|
|
+ irte->dest_id = IRTE_DEST(dest);
|
|
|
+ irte->redir_hint = 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int intel_setup_ioapic_entry(int irq,
|
|
|
+ struct IO_APIC_route_entry *route_entry,
|
|
|
+ unsigned int destination, int vector,
|
|
|
+ struct io_apic_irq_attr *attr)
|
|
|
+{
|
|
|
+ int ioapic_id = mpc_ioapic_id(attr->ioapic);
|
|
|
+ struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
|
|
|
+ struct IR_IO_APIC_route_entry *entry;
|
|
|
+ struct irte irte;
|
|
|
+ int index;
|
|
|
+
|
|
|
+ if (!iommu) {
|
|
|
+ pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ entry = (struct IR_IO_APIC_route_entry *)route_entry;
|
|
|
+
|
|
|
+ index = alloc_irte(iommu, irq, 1);
|
|
|
+ if (index < 0) {
|
|
|
+ pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ prepare_irte(&irte, vector, destination);
|
|
|
+
|
|
|
+ /* Set source-id of interrupt request */
|
|
|
+ set_ioapic_sid(&irte, ioapic_id);
|
|
|
+
|
|
|
+ modify_irte(irq, &irte);
|
|
|
+
|
|
|
+ apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
|
|
|
+ "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
|
|
|
+ "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
|
|
|
+ "Avail:%X Vector:%02X Dest:%08X "
|
|
|
+ "SID:%04X SQ:%X SVT:%X)\n",
|
|
|
+ attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
|
|
|
+ irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
|
|
|
+ irte.avail, irte.vector, irte.dest_id,
|
|
|
+ irte.sid, irte.sq, irte.svt);
|
|
|
+
|
|
|
+ memset(entry, 0, sizeof(*entry));
|
|
|
+
|
|
|
+ entry->index2 = (index >> 15) & 0x1;
|
|
|
+ entry->zero = 0;
|
|
|
+ entry->format = 1;
|
|
|
+ entry->index = (index & 0x7fff);
|
|
|
+ /*
|
|
|
+ * IO-APIC RTE will be configured with virtual vector.
|
|
|
+ * irq handler will do the explicit EOI to the io-apic.
|
|
|
+ */
|
|
|
+ entry->vector = attr->ioapic_pin;
|
|
|
+ entry->mask = 0; /* enable IRQ */
|
|
|
+ entry->trigger = attr->trigger;
|
|
|
+ entry->polarity = attr->polarity;
|
|
|
+
|
|
|
+ /* Mask level triggered irqs.
|
|
|
+ * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
|
|
|
+ */
|
|
|
+ if (attr->trigger)
|
|
|
+ entry->mask = 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
struct irq_remap_ops intel_irq_remap_ops = {
|
|
|
.supported = intel_intr_remapping_supported,
|
|
|
.hardware_init = dmar_table_init,
|
|
@@ -821,4 +909,5 @@ struct irq_remap_ops intel_irq_remap_ops = {
|
|
|
.hardware_disable = disable_intr_remapping,
|
|
|
.hardware_reenable = reenable_intr_remapping,
|
|
|
.enable_faulting = enable_drhd_fault_handling,
|
|
|
+ .setup_ioapic_entry = intel_setup_ioapic_entry,
|
|
|
};
|