|
@@ -4472,12 +4472,19 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_guest_debug *dbg)
|
|
|
{
|
|
|
- int i, r;
|
|
|
+ unsigned long rflags;
|
|
|
+ int old_debug;
|
|
|
+ int i;
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
|
- if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
|
|
|
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
|
|
|
+ old_debug = vcpu->guest_debug;
|
|
|
+
|
|
|
+ vcpu->guest_debug = dbg->control;
|
|
|
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
|
|
|
+ vcpu->guest_debug = 0;
|
|
|
+
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
|
|
|
for (i = 0; i < KVM_NR_DB_REGS; ++i)
|
|
|
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
|
|
|
vcpu->arch.switch_db_regs =
|
|
@@ -4488,16 +4495,23 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|
|
vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
|
|
|
}
|
|
|
|
|
|
- r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
|
|
|
+ rflags = kvm_x86_ops->get_rflags(vcpu);
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
|
+ rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
|
|
|
+ else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
|
|
|
+ rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
|
|
|
+ kvm_x86_ops->set_rflags(vcpu, rflags);
|
|
|
|
|
|
- if (dbg->control & KVM_GUESTDBG_INJECT_DB)
|
|
|
+ kvm_x86_ops->set_guest_debug(vcpu, dbg);
|
|
|
+
|
|
|
+ if (vcpu->guest_debug & KVM_GUESTDBG_INJECT_DB)
|
|
|
kvm_queue_exception(vcpu, DB_VECTOR);
|
|
|
- else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
|
|
|
+ else if (vcpu->guest_debug & KVM_GUESTDBG_INJECT_BP)
|
|
|
kvm_queue_exception(vcpu, BP_VECTOR);
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
|
|
- return r;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|