|
@@ -44,14 +44,19 @@
|
|
|
*/
|
|
|
|
|
|
struct _irqfd {
|
|
|
- struct kvm *kvm;
|
|
|
- struct eventfd_ctx *eventfd;
|
|
|
- int gsi;
|
|
|
- struct list_head list;
|
|
|
- poll_table pt;
|
|
|
- wait_queue_t wait;
|
|
|
- struct work_struct inject;
|
|
|
- struct work_struct shutdown;
|
|
|
+ /* Used for MSI fast-path */
|
|
|
+ struct kvm *kvm;
|
|
|
+ wait_queue_t wait;
|
|
|
+ /* Update side is protected by irqfds.lock */
|
|
|
+ struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
|
|
|
+ /* Used for level IRQ fast-path */
|
|
|
+ int gsi;
|
|
|
+ struct work_struct inject;
|
|
|
+ /* Used for setup/shutdown */
|
|
|
+ struct eventfd_ctx *eventfd;
|
|
|
+ struct list_head list;
|
|
|
+ poll_table pt;
|
|
|
+ struct work_struct shutdown;
|
|
|
};
|
|
|
|
|
|
static struct workqueue_struct *irqfd_cleanup_wq;
|
|
@@ -125,14 +130,22 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
|
|
{
|
|
|
struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
|
|
|
unsigned long flags = (unsigned long)key;
|
|
|
+ struct kvm_kernel_irq_routing_entry *irq;
|
|
|
+ struct kvm *kvm = irqfd->kvm;
|
|
|
|
|
|
- if (flags & POLLIN)
|
|
|
+ if (flags & POLLIN) {
|
|
|
+ rcu_read_lock();
|
|
|
+ irq = rcu_dereference(irqfd->irq_entry);
|
|
|
/* An event has been signaled, inject an interrupt */
|
|
|
- schedule_work(&irqfd->inject);
|
|
|
+ if (irq)
|
|
|
+ kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
|
|
|
+ else
|
|
|
+ schedule_work(&irqfd->inject);
|
|
|
+ rcu_read_unlock();
|
|
|
+ }
|
|
|
|
|
|
if (flags & POLLHUP) {
|
|
|
/* The eventfd is closing, detach from KVM */
|
|
|
- struct kvm *kvm = irqfd->kvm;
|
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&kvm->irqfds.lock, flags);
|
|
@@ -163,9 +176,31 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
|
|
|
add_wait_queue(wqh, &irqfd->wait);
|
|
|
}
|
|
|
|
|
|
+/* Must be called under irqfds.lock */
|
|
|
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
|
|
|
+ struct kvm_irq_routing_table *irq_rt)
|
|
|
+{
|
|
|
+ struct kvm_kernel_irq_routing_entry *e;
|
|
|
+ struct hlist_node *n;
|
|
|
+
|
|
|
+ if (irqfd->gsi >= irq_rt->nr_rt_entries) {
|
|
|
+ rcu_assign_pointer(irqfd->irq_entry, NULL);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
|
|
|
+ /* Only fast-path MSI. */
|
|
|
+ if (e->type == KVM_IRQ_ROUTING_MSI)
|
|
|
+ rcu_assign_pointer(irqfd->irq_entry, e);
|
|
|
+ else
|
|
|
+ rcu_assign_pointer(irqfd->irq_entry, NULL);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static int
|
|
|
kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
|
|
|
{
|
|
|
+ struct kvm_irq_routing_table *irq_rt;
|
|
|
struct _irqfd *irqfd, *tmp;
|
|
|
struct file *file = NULL;
|
|
|
struct eventfd_ctx *eventfd = NULL;
|
|
@@ -215,6 +250,10 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
+ irq_rt = rcu_dereference_protected(kvm->irq_routing,
|
|
|
+ lockdep_is_held(&kvm->irqfds.lock));
|
|
|
+ irqfd_update(kvm, irqfd, irq_rt);
|
|
|
+
|
|
|
events = file->f_op->poll(file, &irqfd->pt);
|
|
|
|
|
|
list_add_tail(&irqfd->list, &kvm->irqfds.items);
|
|
@@ -271,8 +310,17 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
|
|
|
spin_lock_irq(&kvm->irqfds.lock);
|
|
|
|
|
|
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
|
|
|
- if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
|
|
|
+ if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
|
|
|
+ /*
|
|
|
+ * This rcu_assign_pointer is needed for when
|
|
|
+ * another thread calls kvm_irqfd_update before
|
|
|
+ * we flush workqueue below.
|
|
|
+ * It is paired with synchronize_rcu done by caller
|
|
|
+ * of that function.
|
|
|
+ */
|
|
|
+ rcu_assign_pointer(irqfd->irq_entry, NULL);
|
|
|
irqfd_deactivate(irqfd);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
spin_unlock_irq(&kvm->irqfds.lock);
|
|
@@ -321,6 +369,25 @@ kvm_irqfd_release(struct kvm *kvm)
|
|
|
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Change irq_routing and irqfd.
|
|
|
+ * Caller must invoke synchronize_rcu afterwards.
|
|
|
+ */
|
|
|
+void kvm_irq_routing_update(struct kvm *kvm,
|
|
|
+ struct kvm_irq_routing_table *irq_rt)
|
|
|
+{
|
|
|
+ struct _irqfd *irqfd;
|
|
|
+
|
|
|
+ spin_lock_irq(&kvm->irqfds.lock);
|
|
|
+
|
|
|
+ rcu_assign_pointer(kvm->irq_routing, irq_rt);
|
|
|
+
|
|
|
+ list_for_each_entry(irqfd, &kvm->irqfds.items, list)
|
|
|
+ irqfd_update(kvm, irqfd, irq_rt);
|
|
|
+
|
|
|
+ spin_unlock_irq(&kvm->irqfds.lock);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* create a host-wide workqueue for issuing deferred shutdown requests
|
|
|
* aggregated from all vm* instances. We need our own isolated single-thread
|