|
@@ -102,6 +102,23 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
+static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
|
|
|
+ struct kvm_lapic_irq *irq)
|
|
|
+{
|
|
|
+ trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
|
|
|
+
|
|
|
+ irq->dest_id = (e->msi.address_lo &
|
|
|
+ MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
|
|
|
+ irq->vector = (e->msi.data &
|
|
|
+ MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
|
|
|
+ irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
|
|
|
+ irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
|
|
|
+ irq->delivery_mode = e->msi.data & 0x700;
|
|
|
+ irq->level = 1;
|
|
|
+ irq->shorthand = 0;
|
|
|
+ /* TODO Deal with RH bit of MSI message address */
|
|
|
+}
|
|
|
+
|
|
|
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
|
|
struct kvm *kvm, int irq_source_id, int level)
|
|
|
{
|
|
@@ -110,22 +127,26 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
|
|
if (!level)
|
|
|
return -1;
|
|
|
|
|
|
- trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
|
|
|
+ kvm_set_msi_irq(e, &irq);
|
|
|
|
|
|
- irq.dest_id = (e->msi.address_lo &
|
|
|
- MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
|
|
|
- irq.vector = (e->msi.data &
|
|
|
- MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
|
|
|
- irq.dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
|
|
|
- irq.trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
|
|
|
- irq.delivery_mode = e->msi.data & 0x700;
|
|
|
- irq.level = 1;
|
|
|
- irq.shorthand = 0;
|
|
|
-
|
|
|
- /* TODO Deal with RH bit of MSI message address */
|
|
|
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
|
|
|
+ struct kvm *kvm)
|
|
|
+{
|
|
|
+ struct kvm_lapic_irq irq;
|
|
|
+ int r;
|
|
|
+
|
|
|
+ kvm_set_msi_irq(e, &irq);
|
|
|
+
|
|
|
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r))
|
|
|
+ return r;
|
|
|
+ else
|
|
|
+ return -EWOULDBLOCK;
|
|
|
+}
|
|
|
+
|
|
|
int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
|
|
|
{
|
|
|
struct kvm_kernel_irq_routing_entry route;
|
|
@@ -178,6 +199,44 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Deliver an IRQ in an atomic context if we can, or return a failure,
|
|
|
+ * user can retry in a process context.
|
|
|
+ * Return value:
|
|
|
+ * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
|
|
|
+ * Other values - No need to retry.
|
|
|
+ */
|
|
|
+int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
|
|
|
+{
|
|
|
+ struct kvm_kernel_irq_routing_entry *e;
|
|
|
+ int ret = -EINVAL;
|
|
|
+ struct kvm_irq_routing_table *irq_rt;
|
|
|
+ struct hlist_node *n;
|
|
|
+
|
|
|
+ trace_kvm_set_irq(irq, level, irq_source_id);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Injection into either PIC or IOAPIC might need to scan all CPUs,
|
|
|
+ * which would need to be retried from thread context; when same GSI
|
|
|
+ * is connected to both PIC and IOAPIC, we'd have to report a
|
|
|
+ * partial failure here.
|
|
|
+ * Since there's no easy way to do this, we only support injecting MSI
|
|
|
+ * which is limited to 1:1 GSI mapping.
|
|
|
+ */
|
|
|
+ rcu_read_lock();
|
|
|
+ irq_rt = rcu_dereference(kvm->irq_routing);
|
|
|
+ if (irq < irq_rt->nr_rt_entries)
|
|
|
+ hlist_for_each_entry(e, n, &irq_rt->map[irq], link) {
|
|
|
+ if (likely(e->type == KVM_IRQ_ROUTING_MSI))
|
|
|
+ ret = kvm_set_msi_inatomic(e, kvm);
|
|
|
+ else
|
|
|
+ ret = -EWOULDBLOCK;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
|
|
|
{
|
|
|
struct kvm_irq_ack_notifier *kian;
|