|
@@ -2,6 +2,7 @@
|
|
#include <linux/spinlock.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/pci.h>
|
|
|
|
+#include <linux/irq.h>
|
|
#include <asm/io_apic.h>
|
|
#include <asm/io_apic.h>
|
|
#include "intel-iommu.h"
|
|
#include "intel-iommu.h"
|
|
#include "intr_remapping.h"
|
|
#include "intr_remapping.h"
|
|
@@ -10,6 +11,248 @@ static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
|
|
static int ir_ioapic_num;
|
|
static int ir_ioapic_num;
|
|
int intr_remapping_enabled;
|
|
int intr_remapping_enabled;
|
|
|
|
|
|
|
|
+static struct {
|
|
|
|
+ struct intel_iommu *iommu;
|
|
|
|
+ u16 irte_index;
|
|
|
|
+ u16 sub_handle;
|
|
|
|
+ u8 irte_mask;
|
|
|
|
+} irq_2_iommu[NR_IRQS];
|
|
|
|
+
|
|
|
|
+static DEFINE_SPINLOCK(irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+int irq_remapped(int irq)
|
|
|
|
+{
|
|
|
|
+ if (irq > NR_IRQS)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (!irq_2_iommu[irq].iommu)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int get_irte(int irq, struct irte *entry)
|
|
|
|
+{
|
|
|
|
+ int index;
|
|
|
|
+
|
|
|
|
+ if (!entry || irq > NR_IRQS)
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (!irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
|
|
|
+ *entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|
|
|
+{
|
|
|
|
+ struct ir_table *table = iommu->ir_table;
|
|
|
|
+ u16 index, start_index;
|
|
|
|
+ unsigned int mask = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (!count)
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * start the IRTE search from index 0.
|
|
|
|
+ */
|
|
|
|
+ index = start_index = 0;
|
|
|
|
+
|
|
|
|
+ if (count > 1) {
|
|
|
|
+ count = __roundup_pow_of_two(count);
|
|
|
|
+ mask = ilog2(count);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (mask > ecap_max_handle_mask(iommu->ecap)) {
|
|
|
|
+ printk(KERN_ERR
|
|
|
|
+ "Requested mask %x exceeds the max invalidation handle"
|
|
|
|
+ " mask value %Lx\n", mask,
|
|
|
|
+ ecap_max_handle_mask(iommu->ecap));
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ do {
|
|
|
|
+ for (i = index; i < index + count; i++)
|
|
|
|
+ if (table->base[i].present)
|
|
|
|
+ break;
|
|
|
|
+ /* empty index found */
|
|
|
|
+ if (i == index + count)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
|
|
|
|
+
|
|
|
|
+ if (index == start_index) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ printk(KERN_ERR "can't allocate an IRTE\n");
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ } while (1);
|
|
|
|
+
|
|
|
|
+ for (i = index; i < index + count; i++)
|
|
|
|
+ table->base[i].present = 1;
|
|
|
|
+
|
|
|
|
+ irq_2_iommu[irq].iommu = iommu;
|
|
|
|
+ irq_2_iommu[irq].irte_index = index;
|
|
|
|
+ irq_2_iommu[irq].sub_handle = 0;
|
|
|
|
+ irq_2_iommu[irq].irte_mask = mask;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+ return index;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
|
|
|
|
+{
|
|
|
|
+ struct qi_desc desc;
|
|
|
|
+
|
|
|
|
+ desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
|
|
|
|
+ | QI_IEC_SELECTIVE;
|
|
|
|
+ desc.high = 0;
|
|
|
|
+
|
|
|
|
+ qi_submit_sync(&desc, iommu);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int map_irq_to_irte_handle(int irq, u16 *sub_handle)
|
|
|
|
+{
|
|
|
|
+ int index;
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ *sub_handle = irq_2_iommu[irq].sub_handle;
|
|
|
|
+ index = irq_2_iommu[irq].irte_index;
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return index;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
|
|
|
|
+{
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq_2_iommu[irq].iommu = iommu;
|
|
|
|
+ irq_2_iommu[irq].irte_index = index;
|
|
|
|
+ irq_2_iommu[irq].sub_handle = subhandle;
|
|
|
|
+ irq_2_iommu[irq].irte_mask = 0;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
|
|
|
|
+{
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq_2_iommu[irq].iommu = NULL;
|
|
|
|
+ irq_2_iommu[irq].irte_index = 0;
|
|
|
|
+ irq_2_iommu[irq].sub_handle = 0;
|
|
|
|
+ irq_2_iommu[irq].irte_mask = 0;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int modify_irte(int irq, struct irte *irte_modified)
|
|
|
|
+{
|
|
|
|
+ int index;
|
|
|
|
+ struct irte *irte;
|
|
|
|
+ struct intel_iommu *iommu;
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ iommu = irq_2_iommu[irq].iommu;
|
|
|
|
+
|
|
|
|
+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
|
|
|
+ irte = &iommu->ir_table->base[index];
|
|
|
|
+
|
|
|
|
+ set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
|
|
|
|
+ __iommu_flush_cache(iommu, irte, sizeof(*irte));
|
|
|
|
+
|
|
|
|
+ qi_flush_iec(iommu, index, 0);
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int flush_irte(int irq)
|
|
|
|
+{
|
|
|
|
+ int index;
|
|
|
|
+ struct intel_iommu *iommu;
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ iommu = irq_2_iommu[irq].iommu;
|
|
|
|
+
|
|
|
|
+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
|
|
|
+
|
|
|
|
+ qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int free_irte(int irq)
|
|
|
|
+{
|
|
|
|
+ int index, i;
|
|
|
|
+ struct irte *irte;
|
|
|
|
+ struct intel_iommu *iommu;
|
|
|
|
+
|
|
|
|
+ spin_lock(&irq_2_ir_lock);
|
|
|
|
+ if (irq >= NR_IRQS || !irq_2_iommu[irq].iommu) {
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ iommu = irq_2_iommu[irq].iommu;
|
|
|
|
+
|
|
|
|
+ index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
|
|
|
|
+ irte = &iommu->ir_table->base[index];
|
|
|
|
+
|
|
|
|
+ if (!irq_2_iommu[irq].sub_handle) {
|
|
|
|
+ for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
|
|
|
|
+ set_64bit((unsigned long *)irte, 0);
|
|
|
|
+ qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq_2_iommu[irq].iommu = NULL;
|
|
|
|
+ irq_2_iommu[irq].irte_index = 0;
|
|
|
|
+ irq_2_iommu[irq].sub_handle = 0;
|
|
|
|
+ irq_2_iommu[irq].irte_mask = 0;
|
|
|
|
+
|
|
|
|
+ spin_unlock(&irq_2_ir_lock);
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
|
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
|
|
{
|
|
{
|
|
u64 addr;
|
|
u64 addr;
|