|
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
unsigned int inst, int *advance)
|
|
|
{
|
|
|
int emulated = EMULATE_DONE;
|
|
|
+ int rt = get_rt(inst);
|
|
|
+ int rs = get_rs(inst);
|
|
|
+ int ra = get_ra(inst);
|
|
|
+ int rb = get_rb(inst);
|
|
|
|
|
|
switch (get_op(inst)) {
|
|
|
case 19:
|
|
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
case 31:
|
|
|
switch (get_xop(inst)) {
|
|
|
case OP_31_XOP_MFMSR:
|
|
|
- kvmppc_set_gpr(vcpu, get_rt(inst),
|
|
|
- vcpu->arch.shared->msr);
|
|
|
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
|
|
|
break;
|
|
|
case OP_31_XOP_MTMSRD:
|
|
|
{
|
|
|
- ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
|
|
|
+ ulong rs_val = kvmppc_get_gpr(vcpu, rs);
|
|
|
if (inst & 0x10000) {
|
|
|
- vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
|
|
|
- vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
|
|
|
+ ulong new_msr = vcpu->arch.shared->msr;
|
|
|
+ new_msr &= ~(MSR_RI | MSR_EE);
|
|
|
+ new_msr |= rs_val & (MSR_RI | MSR_EE);
|
|
|
+ vcpu->arch.shared->msr = new_msr;
|
|
|
} else
|
|
|
- kvmppc_set_msr(vcpu, rs);
|
|
|
+ kvmppc_set_msr(vcpu, rs_val);
|
|
|
break;
|
|
|
}
|
|
|
case OP_31_XOP_MTMSR:
|
|
|
- kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
|
|
|
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
|
|
|
break;
|
|
|
case OP_31_XOP_MFSR:
|
|
|
{
|
|
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
if (vcpu->arch.mmu.mfsrin) {
|
|
|
u32 sr;
|
|
|
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
|
|
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
|
|
|
+ kvmppc_set_gpr(vcpu, rt, sr);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
{
|
|
|
int srnum;
|
|
|
|
|
|
- srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
|
|
|
+ srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
|
|
|
if (vcpu->arch.mmu.mfsrin) {
|
|
|
u32 sr;
|
|
|
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
|
|
|
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
|
|
|
+ kvmppc_set_gpr(vcpu, rt, sr);
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
case OP_31_XOP_MTSR:
|
|
|
vcpu->arch.mmu.mtsrin(vcpu,
|
|
|
(inst >> 16) & 0xf,
|
|
|
- kvmppc_get_gpr(vcpu, get_rs(inst)));
|
|
|
+ kvmppc_get_gpr(vcpu, rs));
|
|
|
break;
|
|
|
case OP_31_XOP_MTSRIN:
|
|
|
vcpu->arch.mmu.mtsrin(vcpu,
|
|
|
- (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
|
|
|
- kvmppc_get_gpr(vcpu, get_rs(inst)));
|
|
|
+ (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
|
|
|
+ kvmppc_get_gpr(vcpu, rs));
|
|
|
break;
|
|
|
case OP_31_XOP_TLBIE:
|
|
|
case OP_31_XOP_TLBIEL:
|
|
|
{
|
|
|
bool large = (inst & 0x00200000) ? true : false;
|
|
|
- ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
|
|
|
+ ulong addr = kvmppc_get_gpr(vcpu, rb);
|
|
|
vcpu->arch.mmu.tlbie(vcpu, addr, large);
|
|
|
break;
|
|
|
}
|
|
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
|
vcpu->arch.mmu.slbmte(vcpu,
|
|
|
- kvmppc_get_gpr(vcpu, get_rs(inst)),
|
|
|
- kvmppc_get_gpr(vcpu, get_rb(inst)));
|
|
|
+ kvmppc_get_gpr(vcpu, rs),
|
|
|
+ kvmppc_get_gpr(vcpu, rb));
|
|
|
break;
|
|
|
case OP_31_XOP_SLBIE:
|
|
|
if (!vcpu->arch.mmu.slbie)
|
|
|
return EMULATE_FAIL;
|
|
|
|
|
|
vcpu->arch.mmu.slbie(vcpu,
|
|
|
- kvmppc_get_gpr(vcpu, get_rb(inst)));
|
|
|
+ kvmppc_get_gpr(vcpu, rb));
|
|
|
break;
|
|
|
case OP_31_XOP_SLBIA:
|
|
|
if (!vcpu->arch.mmu.slbia)
|
|
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
if (!vcpu->arch.mmu.slbmfee) {
|
|
|
emulated = EMULATE_FAIL;
|
|
|
} else {
|
|
|
- ulong t, rb;
|
|
|
+ ulong t, rb_val;
|
|
|
|
|
|
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
|
|
- t = vcpu->arch.mmu.slbmfee(vcpu, rb);
|
|
|
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
|
|
|
+ rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
+ t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
|
|
|
+ kvmppc_set_gpr(vcpu, rt, t);
|
|
|
}
|
|
|
break;
|
|
|
case OP_31_XOP_SLBMFEV:
|
|
|
if (!vcpu->arch.mmu.slbmfev) {
|
|
|
emulated = EMULATE_FAIL;
|
|
|
} else {
|
|
|
- ulong t, rb;
|
|
|
+ ulong t, rb_val;
|
|
|
|
|
|
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
|
|
- t = vcpu->arch.mmu.slbmfev(vcpu, rb);
|
|
|
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
|
|
|
+ rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
+ t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
|
|
|
+ kvmppc_set_gpr(vcpu, rt, t);
|
|
|
}
|
|
|
break;
|
|
|
case OP_31_XOP_DCBA:
|
|
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
break;
|
|
|
case OP_31_XOP_DCBZ:
|
|
|
{
|
|
|
- ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
|
|
|
- ulong ra = 0;
|
|
|
+ ulong rb_val = kvmppc_get_gpr(vcpu, rb);
|
|
|
+ ulong ra_val = 0;
|
|
|
ulong addr, vaddr;
|
|
|
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
|
u32 dsisr;
|
|
|
int r;
|
|
|
|
|
|
- if (get_ra(inst))
|
|
|
- ra = kvmppc_get_gpr(vcpu, get_ra(inst));
|
|
|
+ if (ra)
|
|
|
+ ra_val = kvmppc_get_gpr(vcpu, ra);
|
|
|
|
|
|
- addr = (ra + rb) & ~31ULL;
|
|
|
+ addr = (ra_val + rb_val) & ~31ULL;
|
|
|
if (!(vcpu->arch.shared->msr & MSR_SF))
|
|
|
addr &= 0xffffffff;
|
|
|
vaddr = addr;
|
|
@@ -565,23 +570,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
|
|
|
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
|
|
|
{
|
|
|
ulong dar = 0;
|
|
|
- ulong ra;
|
|
|
+ ulong ra = get_ra(inst);
|
|
|
+ ulong rb = get_rb(inst);
|
|
|
|
|
|
switch (get_op(inst)) {
|
|
|
case OP_LFS:
|
|
|
case OP_LFD:
|
|
|
case OP_STFD:
|
|
|
case OP_STFS:
|
|
|
- ra = get_ra(inst);
|
|
|
if (ra)
|
|
|
dar = kvmppc_get_gpr(vcpu, ra);
|
|
|
dar += (s32)((s16)inst);
|
|
|
break;
|
|
|
case 31:
|
|
|
- ra = get_ra(inst);
|
|
|
if (ra)
|
|
|
dar = kvmppc_get_gpr(vcpu, ra);
|
|
|
- dar += kvmppc_get_gpr(vcpu, get_rb(inst));
|
|
|
+ dar += kvmppc_get_gpr(vcpu, rb);
|
|
|
break;
|
|
|
default:
|
|
|
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
|