|
@@ -62,6 +62,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
|
|
int i, r = -1;
|
|
int i, r = -1;
|
|
struct kvm_vcpu *vcpu, *lowest = NULL;
|
|
struct kvm_vcpu *vcpu, *lowest = NULL;
|
|
|
|
|
|
|
|
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
|
|
|
|
+
|
|
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
|
|
if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
|
|
kvm_is_dm_lowest_prio(irq))
|
|
kvm_is_dm_lowest_prio(irq))
|
|
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
|
|
printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
|
|
@@ -113,7 +115,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
|
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
|
|
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
|
|
}
|
|
}
|
|
|
|
|
|
-/* This should be called with the kvm->lock mutex held
|
|
|
|
|
|
+/* This should be called with the kvm->irq_lock mutex held
|
|
* Return value:
|
|
* Return value:
|
|
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
|
|
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
|
|
* = 0 Interrupt was coalesced (previous irq is still pending)
|
|
* = 0 Interrupt was coalesced (previous irq is still pending)
|
|
@@ -125,6 +127,8 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level)
|
|
unsigned long *irq_state, sig_level;
|
|
unsigned long *irq_state, sig_level;
|
|
int ret = -1;
|
|
int ret = -1;
|
|
|
|
|
|
|
|
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
|
|
|
|
+
|
|
if (irq < KVM_IOAPIC_NUM_PINS) {
|
|
if (irq < KVM_IOAPIC_NUM_PINS) {
|
|
irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
|
|
irq_state = (unsigned long *)&kvm->arch.irq_states[irq];
|
|
|
|
|
|
@@ -175,19 +179,26 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
|
|
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
|
void kvm_register_irq_ack_notifier(struct kvm *kvm,
|
|
struct kvm_irq_ack_notifier *kian)
|
|
struct kvm_irq_ack_notifier *kian)
|
|
{
|
|
{
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
|
|
hlist_add_head(&kian->link, &kvm->arch.irq_ack_notifier_list);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian)
|
|
|
|
|
|
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
|
|
|
|
+ struct kvm_irq_ack_notifier *kian)
|
|
{
|
|
{
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
hlist_del_init(&kian->link);
|
|
hlist_del_init(&kian->link);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-/* The caller must hold kvm->lock mutex */
|
|
|
|
int kvm_request_irq_source_id(struct kvm *kvm)
|
|
int kvm_request_irq_source_id(struct kvm *kvm)
|
|
{
|
|
{
|
|
unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
|
|
unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
|
|
- int irq_source_id = find_first_zero_bit(bitmap,
|
|
|
|
|
|
+ int irq_source_id;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
|
|
+ irq_source_id = find_first_zero_bit(bitmap,
|
|
sizeof(kvm->arch.irq_sources_bitmap));
|
|
sizeof(kvm->arch.irq_sources_bitmap));
|
|
|
|
|
|
if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
|
|
if (irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
|
|
@@ -197,6 +208,7 @@ int kvm_request_irq_source_id(struct kvm *kvm)
|
|
|
|
|
|
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
|
|
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
|
|
set_bit(irq_source_id, bitmap);
|
|
set_bit(irq_source_id, bitmap);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
|
|
|
|
return irq_source_id;
|
|
return irq_source_id;
|
|
}
|
|
}
|
|
@@ -207,6 +219,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
|
|
|
|
|
|
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
|
|
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
|
|
|
|
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
if (irq_source_id < 0 ||
|
|
if (irq_source_id < 0 ||
|
|
irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
|
|
irq_source_id >= sizeof(kvm->arch.irq_sources_bitmap)) {
|
|
printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
|
|
printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
|
|
@@ -215,19 +228,24 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
|
|
for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
|
|
for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
|
|
clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
|
|
clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
|
|
clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
|
|
clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
|
|
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
|
|
struct kvm_irq_mask_notifier *kimn)
|
|
struct kvm_irq_mask_notifier *kimn)
|
|
{
|
|
{
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
kimn->irq = irq;
|
|
kimn->irq = irq;
|
|
hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
|
|
hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
|
|
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
|
|
struct kvm_irq_mask_notifier *kimn)
|
|
struct kvm_irq_mask_notifier *kimn)
|
|
{
|
|
{
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
hlist_del(&kimn->link);
|
|
hlist_del(&kimn->link);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
|
|
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
|
|
@@ -235,6 +253,8 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
|
|
struct kvm_irq_mask_notifier *kimn;
|
|
struct kvm_irq_mask_notifier *kimn;
|
|
struct hlist_node *n;
|
|
struct hlist_node *n;
|
|
|
|
|
|
|
|
+ WARN_ON(!mutex_is_locked(&kvm->irq_lock));
|
|
|
|
+
|
|
hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
|
|
hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
|
|
if (kimn->irq == irq)
|
|
if (kimn->irq == irq)
|
|
kimn->func(kimn, mask);
|
|
kimn->func(kimn, mask);
|
|
@@ -250,7 +270,9 @@ static void __kvm_free_irq_routing(struct list_head *irq_routing)
|
|
|
|
|
|
void kvm_free_irq_routing(struct kvm *kvm)
|
|
void kvm_free_irq_routing(struct kvm *kvm)
|
|
{
|
|
{
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
__kvm_free_irq_routing(&kvm->irq_routing);
|
|
__kvm_free_irq_routing(&kvm->irq_routing);
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
|
|
static int setup_routing_entry(struct kvm_kernel_irq_routing_entry *e,
|
|
@@ -325,13 +347,13 @@ int kvm_set_irq_routing(struct kvm *kvm,
|
|
e = NULL;
|
|
e = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- mutex_lock(&kvm->lock);
|
|
|
|
|
|
+ mutex_lock(&kvm->irq_lock);
|
|
list_splice(&kvm->irq_routing, &tmp);
|
|
list_splice(&kvm->irq_routing, &tmp);
|
|
INIT_LIST_HEAD(&kvm->irq_routing);
|
|
INIT_LIST_HEAD(&kvm->irq_routing);
|
|
list_splice(&irq_list, &kvm->irq_routing);
|
|
list_splice(&irq_list, &kvm->irq_routing);
|
|
INIT_LIST_HEAD(&irq_list);
|
|
INIT_LIST_HEAD(&irq_list);
|
|
list_splice(&tmp, &irq_list);
|
|
list_splice(&tmp, &irq_list);
|
|
- mutex_unlock(&kvm->lock);
|
|
|
|
|
|
+ mutex_unlock(&kvm->irq_lock);
|
|
|
|
|
|
r = 0;
|
|
r = 0;
|
|
|
|
|