|
@@ -133,13 +133,6 @@ static int handle_stop(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
vcpu->stat.exit_stop_request++;
|
|
|
spin_lock_bh(&vcpu->arch.local_int.lock);
|
|
|
- if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
|
|
|
- vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
|
|
|
- rc = kvm_s390_vcpu_store_status(vcpu,
|
|
|
- KVM_S390_STORE_STATUS_NOADDR);
|
|
|
- if (rc >= 0)
|
|
|
- rc = -EOPNOTSUPP;
|
|
|
- }
|
|
|
|
|
|
if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
|
|
|
vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
|
|
@@ -155,7 +148,18 @@ static int handle_stop(struct kvm_vcpu *vcpu)
|
|
|
rc = -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
|
- spin_unlock_bh(&vcpu->arch.local_int.lock);
|
|
|
+ if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
|
|
|
+ vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
|
|
|
+ /* store status must be called unlocked. Since local_int.lock
|
|
|
+ * only protects local_int.* and not guest memory we can give
|
|
|
+ * up the lock here */
|
|
|
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
|
|
|
+ rc = kvm_s390_vcpu_store_status(vcpu,
|
|
|
+ KVM_S390_STORE_STATUS_NOADDR);
|
|
|
+ if (rc >= 0)
|
|
|
+ rc = -EOPNOTSUPP;
|
|
|
+ } else
|
|
|
+ spin_unlock_bh(&vcpu->arch.local_int.lock);
|
|
|
return rc;
|
|
|
}
|
|
|
|