|
@@ -30,7 +30,6 @@
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/xen.h>
|
|
#include <xen/interface/physdev.h>
|
|
#include <xen/interface/physdev.h>
|
|
#include <xen/interface/vcpu.h>
|
|
#include <xen/interface/vcpu.h>
|
|
-#include <xen/interface/sched.h>
|
|
|
|
#include <xen/features.h>
|
|
#include <xen/features.h>
|
|
#include <xen/page.h>
|
|
#include <xen/page.h>
|
|
#include <xen/hvc-console.h>
|
|
#include <xen/hvc-console.h>
|
|
@@ -226,94 +225,6 @@ static unsigned long xen_get_debugreg(int reg)
|
|
return HYPERVISOR_get_debugreg(reg);
|
|
return HYPERVISOR_get_debugreg(reg);
|
|
}
|
|
}
|
|
|
|
|
|
-static unsigned long xen_save_fl(void)
|
|
|
|
-{
|
|
|
|
- struct vcpu_info *vcpu;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- vcpu = x86_read_percpu(xen_vcpu);
|
|
|
|
-
|
|
|
|
- /* flag has opposite sense of mask */
|
|
|
|
- flags = !vcpu->evtchn_upcall_mask;
|
|
|
|
-
|
|
|
|
- /* convert to IF type flag
|
|
|
|
- -0 -> 0x00000000
|
|
|
|
- -1 -> 0xffffffff
|
|
|
|
- */
|
|
|
|
- return (-flags) & X86_EFLAGS_IF;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xen_restore_fl(unsigned long flags)
|
|
|
|
-{
|
|
|
|
- struct vcpu_info *vcpu;
|
|
|
|
-
|
|
|
|
- /* convert from IF type flag */
|
|
|
|
- flags = !(flags & X86_EFLAGS_IF);
|
|
|
|
-
|
|
|
|
- /* There's a one instruction preempt window here. We need to
|
|
|
|
- make sure we're don't switch CPUs between getting the vcpu
|
|
|
|
- pointer and updating the mask. */
|
|
|
|
- preempt_disable();
|
|
|
|
- vcpu = x86_read_percpu(xen_vcpu);
|
|
|
|
- vcpu->evtchn_upcall_mask = flags;
|
|
|
|
- preempt_enable_no_resched();
|
|
|
|
-
|
|
|
|
- /* Doesn't matter if we get preempted here, because any
|
|
|
|
- pending event will get dealt with anyway. */
|
|
|
|
-
|
|
|
|
- if (flags == 0) {
|
|
|
|
- preempt_check_resched();
|
|
|
|
- barrier(); /* unmask then check (avoid races) */
|
|
|
|
- if (unlikely(vcpu->evtchn_upcall_pending))
|
|
|
|
- force_evtchn_callback();
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xen_irq_disable(void)
|
|
|
|
-{
|
|
|
|
- /* There's a one instruction preempt window here. We need to
|
|
|
|
- make sure we're don't switch CPUs between getting the vcpu
|
|
|
|
- pointer and updating the mask. */
|
|
|
|
- preempt_disable();
|
|
|
|
- x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
|
|
|
|
- preempt_enable_no_resched();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xen_irq_enable(void)
|
|
|
|
-{
|
|
|
|
- struct vcpu_info *vcpu;
|
|
|
|
-
|
|
|
|
- /* We don't need to worry about being preempted here, since
|
|
|
|
- either a) interrupts are disabled, so no preemption, or b)
|
|
|
|
- the caller is confused and is trying to re-enable interrupts
|
|
|
|
- on an indeterminate processor. */
|
|
|
|
-
|
|
|
|
- vcpu = x86_read_percpu(xen_vcpu);
|
|
|
|
- vcpu->evtchn_upcall_mask = 0;
|
|
|
|
-
|
|
|
|
- /* Doesn't matter if we get preempted here, because any
|
|
|
|
- pending event will get dealt with anyway. */
|
|
|
|
-
|
|
|
|
- barrier(); /* unmask then check (avoid races) */
|
|
|
|
- if (unlikely(vcpu->evtchn_upcall_pending))
|
|
|
|
- force_evtchn_callback();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xen_safe_halt(void)
|
|
|
|
-{
|
|
|
|
- /* Blocking includes an implicit local_irq_enable(). */
|
|
|
|
- if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
|
|
|
|
- BUG();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void xen_halt(void)
|
|
|
|
-{
|
|
|
|
- if (irqs_disabled())
|
|
|
|
- HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
|
|
|
|
- else
|
|
|
|
- xen_safe_halt();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void xen_leave_lazy(void)
|
|
static void xen_leave_lazy(void)
|
|
{
|
|
{
|
|
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
|
paravirt_leave_lazy(paravirt_get_lazy_mode());
|
|
@@ -1308,36 +1219,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
|
},
|
|
},
|
|
};
|
|
};
|
|
|
|
|
|
-static void __init __xen_init_IRQ(void)
|
|
|
|
-{
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- /* Create identity vector->irq map */
|
|
|
|
- for(i = 0; i < NR_VECTORS; i++) {
|
|
|
|
- int cpu;
|
|
|
|
-
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
|
- per_cpu(vector_irq, cpu)[i] = i;
|
|
|
|
- }
|
|
|
|
-#endif /* CONFIG_X86_64 */
|
|
|
|
-
|
|
|
|
- xen_init_IRQ();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static const struct pv_irq_ops xen_irq_ops __initdata = {
|
|
|
|
- .init_IRQ = __xen_init_IRQ,
|
|
|
|
- .save_fl = xen_save_fl,
|
|
|
|
- .restore_fl = xen_restore_fl,
|
|
|
|
- .irq_disable = xen_irq_disable,
|
|
|
|
- .irq_enable = xen_irq_enable,
|
|
|
|
- .safe_halt = xen_safe_halt,
|
|
|
|
- .halt = xen_halt,
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
|
- .adjust_exception_frame = xen_adjust_exception_frame,
|
|
|
|
-#endif
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
static const struct pv_apic_ops xen_apic_ops __initdata = {
|
|
static const struct pv_apic_ops xen_apic_ops __initdata = {
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
.apic_write = xen_apic_write,
|
|
.apic_write = xen_apic_write,
|
|
@@ -1740,10 +1621,11 @@ asmlinkage void __init xen_start_kernel(void)
|
|
pv_init_ops = xen_init_ops;
|
|
pv_init_ops = xen_init_ops;
|
|
pv_time_ops = xen_time_ops;
|
|
pv_time_ops = xen_time_ops;
|
|
pv_cpu_ops = xen_cpu_ops;
|
|
pv_cpu_ops = xen_cpu_ops;
|
|
- pv_irq_ops = xen_irq_ops;
|
|
|
|
pv_apic_ops = xen_apic_ops;
|
|
pv_apic_ops = xen_apic_ops;
|
|
pv_mmu_ops = xen_mmu_ops;
|
|
pv_mmu_ops = xen_mmu_ops;
|
|
|
|
|
|
|
|
+ xen_init_irq_ops();
|
|
|
|
+
|
|
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
|
|
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
|
|
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
|
|
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
|
|
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
|
|
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
|