|
@@ -57,6 +57,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|
|
struct kvmppc_pte *pte, bool data);
|
|
|
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
|
|
+ u64 *vsid);
|
|
|
|
|
|
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
|
|
|
{
|
|
@@ -66,13 +68,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
|
|
|
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|
|
bool data)
|
|
|
{
|
|
|
- struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr);
|
|
|
+ u64 vsid;
|
|
|
struct kvmppc_pte pte;
|
|
|
|
|
|
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
|
|
|
return pte.vpage;
|
|
|
|
|
|
- return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16);
|
|
|
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
|
|
+ return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
|
|
|
}
|
|
|
|
|
|
static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
|
|
@@ -142,8 +145,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|
|
bat->bepi_mask);
|
|
|
}
|
|
|
if ((eaddr & bat->bepi_mask) == bat->bepi) {
|
|
|
+ u64 vsid;
|
|
|
+ kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
|
|
|
+ eaddr >> SID_SHIFT, &vsid);
|
|
|
+ vsid <<= 16;
|
|
|
+ pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
|
|
|
+
|
|
|
pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
|
|
|
- pte->vpage = (eaddr >> 12) | VSID_BAT;
|
|
|
pte->may_read = bat->pp;
|
|
|
pte->may_write = bat->pp > 1;
|
|
|
pte->may_execute = true;
|
|
@@ -302,6 +310,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
|
|
/* And then put in the new SR */
|
|
|
sre->raw = value;
|
|
|
sre->vsid = (value & 0x0fffffff);
|
|
|
+ sre->valid = (value & 0x80000000) ? false : true;
|
|
|
sre->Ks = (value & 0x40000000) ? true : false;
|
|
|
sre->Kp = (value & 0x20000000) ? true : false;
|
|
|
sre->nx = (value & 0x10000000) ? true : false;
|
|
@@ -312,7 +321,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
|
|
|
|
|
|
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
|
|
|
{
|
|
|
- kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL);
|
|
|
+ kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
|
|
|
}
|
|
|
|
|
|
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
|
@@ -333,15 +342,22 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
|
|
break;
|
|
|
case MSR_DR|MSR_IR:
|
|
|
{
|
|
|
- ulong ea;
|
|
|
- ea = esid << SID_SHIFT;
|
|
|
- *vsid = find_sr(to_book3s(vcpu), ea)->vsid;
|
|
|
+ ulong ea = esid << SID_SHIFT;
|
|
|
+ struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea);
|
|
|
+
|
|
|
+ if (!sr->valid)
|
|
|
+ return -1;
|
|
|
+
|
|
|
+ *vsid = sr->vsid;
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
|
BUG();
|
|
|
}
|
|
|
|
|
|
+ if (vcpu->arch.msr & MSR_PR)
|
|
|
+ *vsid |= VSID_PR;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|