|
@@ -1,5 +1,5 @@
|
|
|
/*
|
|
|
- * Fault injection for 64bit guests.
|
|
|
+ * Fault injection for both 32 and 64bit guests.
|
|
|
*
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
@@ -29,6 +29,74 @@
|
|
|
PSR_I_BIT | PSR_D_BIT)
|
|
|
#define EL1_EXCEPT_SYNC_OFFSET 0x200
|
|
|
|
|
|
+static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
|
|
|
+{
|
|
|
+ unsigned long cpsr;
|
|
|
+ unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
|
|
|
+ bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
|
|
|
+ u32 return_offset = (is_thumb) ? 4 : 0;
|
|
|
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
|
|
|
+
|
|
|
+ cpsr = mode | COMPAT_PSR_I_BIT;
|
|
|
+
|
|
|
+ if (sctlr & (1 << 30))
|
|
|
+ cpsr |= COMPAT_PSR_T_BIT;
|
|
|
+ if (sctlr & (1 << 25))
|
|
|
+ cpsr |= COMPAT_PSR_E_BIT;
|
|
|
+
|
|
|
+ *vcpu_cpsr(vcpu) = cpsr;
|
|
|
+
|
|
|
+ /* Note: These now point to the banked copies */
|
|
|
+ *vcpu_spsr(vcpu) = new_spsr_value;
|
|
|
+ *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
|
|
+
|
|
|
+ /* Branch to exception vector */
|
|
|
+ if (sctlr & (1 << 13))
|
|
|
+ vect_offset += 0xffff0000;
|
|
|
+ else /* always have security exceptions */
|
|
|
+ vect_offset += vcpu_cp15(vcpu, c12_VBAR);
|
|
|
+
|
|
|
+ *vcpu_pc(vcpu) = vect_offset;
|
|
|
+}
|
|
|
+
|
|
|
+static void inject_undef32(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
|
|
|
+ * pseudocode.
|
|
|
+ */
|
|
|
+static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
|
|
|
+ unsigned long addr)
|
|
|
+{
|
|
|
+ u32 vect_offset;
|
|
|
+ u32 *far, *fsr;
|
|
|
+ bool is_lpae;
|
|
|
+
|
|
|
+ if (is_pabt) {
|
|
|
+ vect_offset = 12;
|
|
|
+ far = &vcpu_cp15(vcpu, c6_IFAR);
|
|
|
+ fsr = &vcpu_cp15(vcpu, c5_IFSR);
|
|
|
+ } else { /* !iabt */
|
|
|
+ vect_offset = 16;
|
|
|
+ far = &vcpu_cp15(vcpu, c6_DFAR);
|
|
|
+ fsr = &vcpu_cp15(vcpu, c5_DFSR);
|
|
|
+ }
|
|
|
+
|
|
|
+ prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
|
|
|
+
|
|
|
+ *far = addr;
|
|
|
+
|
|
|
+ /* Give the guest an IMPLEMENTATION DEFINED exception */
|
|
|
+ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
|
|
|
+ if (is_lpae)
|
|
|
+ *fsr = 1 << 9 | 0x34;
|
|
|
+ else
|
|
|
+ *fsr = 0x14;
|
|
|
+}
|
|
|
+
|
|
|
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
|
|
|
{
|
|
|
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
|
@@ -98,6 +166,9 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
|
|
|
*/
|
|
|
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
{
|
|
|
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
|
+ inject_abt32(vcpu, false, addr);
|
|
|
+
|
|
|
inject_abt64(vcpu, false, addr);
|
|
|
}
|
|
|
|
|
@@ -111,6 +182,9 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
*/
|
|
|
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
{
|
|
|
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
|
+ inject_abt32(vcpu, true, addr);
|
|
|
+
|
|
|
inject_abt64(vcpu, true, addr);
|
|
|
}
|
|
|
|
|
@@ -122,5 +196,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
|
|
*/
|
|
|
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ if (!(vcpu->arch.hcr_el2 & HCR_RW))
|
|
|
+ inject_undef32(vcpu);
|
|
|
+
|
|
|
inject_undef64(vcpu);
|
|
|
}
|