|
@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
|
|
|
send_sig_info(SIGBUS, &info, tsk);
|
|
|
}
|
|
|
|
|
|
-static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva,
|
|
|
- unsigned access, gfn_t gfn, pfn_t pfn)
|
|
|
+static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
|
|
|
{
|
|
|
kvm_release_pfn_clean(pfn);
|
|
|
if (is_hwpoison_pfn(pfn)) {
|
|
|
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
|
|
|
return 0;
|
|
|
- } else if (is_fault_pfn(pfn))
|
|
|
- return -EFAULT;
|
|
|
+ }
|
|
|
|
|
|
- vcpu_cache_mmio_info(vcpu, gva, gfn, access);
|
|
|
- return 1;
|
|
|
+ return -EFAULT;
|
|
|
}
|
|
|
|
|
|
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
|
@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static bool mmu_invalid_pfn(pfn_t pfn)
|
|
|
+{
|
|
|
+ return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn));
|
|
|
+}
|
|
|
+
|
|
|
+static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
|
|
+ pfn_t pfn, unsigned access, int *ret_val)
|
|
|
+{
|
|
|
+ bool ret = true;
|
|
|
+
|
|
|
+ /* The pfn is invalid, report the error! */
|
|
|
+ if (unlikely(is_invalid_pfn(pfn))) {
|
|
|
+ *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
|
|
|
+ goto exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (unlikely(is_noslot_pfn(pfn))) {
|
|
|
+ vcpu_cache_mmio_info(vcpu, gva, gfn, access);
|
|
|
+ *ret_val = 1;
|
|
|
+ goto exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = false;
|
|
|
+exit:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
|
|
gva_t gva, pfn_t *pfn, bool write, bool *writable);
|
|
|
|
|
@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
|
|
|
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
|
|
|
return 0;
|
|
|
|
|
|
- /* mmio */
|
|
|
- if (is_error_pfn(pfn))
|
|
|
- return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
|
|
|
+ if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
|
|
|
+ return r;
|
|
|
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
if (mmu_notifier_retry(vcpu, mmu_seq))
|
|
@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
|
|
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
|
|
|
return 0;
|
|
|
|
|
|
- /* mmio */
|
|
|
- if (is_error_pfn(pfn))
|
|
|
- return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
|
|
|
+ if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
|
|
|
+ return r;
|
|
|
+
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
if (mmu_notifier_retry(vcpu, mmu_seq))
|
|
|
goto out_unlock;
|