|
@@ -29,6 +29,7 @@
|
|
|
#include <linux/bootmem.h>
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
+#include <asm/desc.h>
|
|
|
#include <asm/ptrace.h>
|
|
|
#include <asm/irq.h>
|
|
|
#include <asm/idle.h>
|
|
@@ -36,10 +37,14 @@
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
|
|
+#include <xen/xen.h>
|
|
|
+#include <xen/hvm.h>
|
|
|
#include <xen/xen-ops.h>
|
|
|
#include <xen/events.h>
|
|
|
#include <xen/interface/xen.h>
|
|
|
#include <xen/interface/event_channel.h>
|
|
|
+#include <xen/interface/hvm/hvm_op.h>
|
|
|
+#include <xen/interface/hvm/params.h>
|
|
|
|
|
|
/*
|
|
|
* This lock protects updates to the following mapping and reference-count
|
|
@@ -617,17 +622,13 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
|
|
|
* a bitset of words which contain pending event bits. The second
|
|
|
* level is a bitset of pending events themselves.
|
|
|
*/
|
|
|
-void xen_evtchn_do_upcall(struct pt_regs *regs)
|
|
|
+static void __xen_evtchn_do_upcall(void)
|
|
|
{
|
|
|
int cpu = get_cpu();
|
|
|
- struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
struct shared_info *s = HYPERVISOR_shared_info;
|
|
|
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
|
|
|
unsigned count;
|
|
|
|
|
|
- exit_idle();
|
|
|
- irq_enter();
|
|
|
-
|
|
|
do {
|
|
|
unsigned long pending_words;
|
|
|
|
|
@@ -667,10 +668,26 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
|
|
|
} while(count != 1);
|
|
|
|
|
|
out:
|
|
|
+
|
|
|
+ put_cpu();
|
|
|
+}
|
|
|
+
|
|
|
+void xen_evtchn_do_upcall(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
+
|
|
|
+ exit_idle();
|
|
|
+ irq_enter();
|
|
|
+
|
|
|
+ __xen_evtchn_do_upcall();
|
|
|
+
|
|
|
irq_exit();
|
|
|
set_irq_regs(old_regs);
|
|
|
+}
|
|
|
|
|
|
- put_cpu();
|
|
|
+void xen_hvm_evtchn_do_upcall(void)
|
|
|
+{
|
|
|
+ __xen_evtchn_do_upcall();
|
|
|
}
|
|
|
|
|
|
/* Rebind a new event channel to an existing irq. */
|
|
@@ -933,6 +950,40 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
|
|
|
.retrigger = retrigger_dynirq,
|
|
|
};
|
|
|
|
|
|
+int xen_set_callback_via(uint64_t via)
|
|
|
+{
|
|
|
+ struct xen_hvm_param a;
|
|
|
+ a.domid = DOMID_SELF;
|
|
|
+ a.index = HVM_PARAM_CALLBACK_IRQ;
|
|
|
+ a.value = via;
|
|
|
+ return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
|
|
|
+
|
|
|
+/* Vector callbacks are better than PCI interrupts to receive event
|
|
|
+ * channel notifications because we can receive vector callbacks on any
|
|
|
+ * vcpu and we don't need PCI support or APIC interactions. */
|
|
|
+void xen_callback_vector(void)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ uint64_t callback_via;
|
|
|
+ if (xen_have_vector_callback) {
|
|
|
+ callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
|
|
|
+ rc = xen_set_callback_via(callback_via);
|
|
|
+ if (rc) {
|
|
|
+ printk(KERN_ERR "Request for Xen HVM callback vector"
|
|
|
+ " failed.\n");
|
|
|
+ xen_have_vector_callback = 0;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ printk(KERN_INFO "Xen HVM callback vector for event delivery is "
|
|
|
+ "enabled\n");
|
|
|
+ /* in the restore case the vector has already been allocated */
|
|
|
+ if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
|
|
|
+ alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void __init xen_init_IRQ(void)
|
|
|
{
|
|
|
int i;
|
|
@@ -947,5 +998,10 @@ void __init xen_init_IRQ(void)
|
|
|
for (i = 0; i < NR_EVENT_CHANNELS; i++)
|
|
|
mask_evtchn(i);
|
|
|
|
|
|
- irq_ctx_init(smp_processor_id());
|
|
|
+ if (xen_hvm_domain()) {
|
|
|
+ xen_callback_vector();
|
|
|
+ native_init_IRQ();
|
|
|
+ } else {
|
|
|
+ irq_ctx_init(smp_processor_id());
|
|
|
+ }
|
|
|
}
|