|
@@ -43,6 +43,7 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/perf_event.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
+#include <linux/hash.h>
|
|
|
#include <trace/events/kvm.h>
|
|
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
@@ -155,6 +156,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|
|
|
|
|
u64 __read_mostly host_xcr0;
|
|
|
|
|
|
+static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
|
|
|
+ vcpu->arch.apf.gfns[i] = ~0;
|
|
|
+}
|
|
|
+
|
|
|
static void kvm_on_user_return(struct user_return_notifier *urn)
|
|
|
{
|
|
|
unsigned slot;
|
|
@@ -5115,6 +5123,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
vcpu->fpu_active = 0;
|
|
|
kvm_x86_ops->fpu_deactivate(vcpu);
|
|
|
}
|
|
|
+ if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
|
|
|
+ /* Page is swapped out. Do synthetic halt */
|
|
|
+ vcpu->arch.apf.halted = true;
|
|
|
+ r = 1;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
r = kvm_mmu_reload(vcpu);
|
|
@@ -5243,7 +5257,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
r = 1;
|
|
|
while (r > 0) {
|
|
|
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
|
|
|
+ if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
|
|
+ !vcpu->arch.apf.halted)
|
|
|
r = vcpu_enter_guest(vcpu);
|
|
|
else {
|
|
|
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
|
|
@@ -5256,6 +5271,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.mp_state =
|
|
|
KVM_MP_STATE_RUNNABLE;
|
|
|
case KVM_MP_STATE_RUNNABLE:
|
|
|
+ vcpu->arch.apf.halted = false;
|
|
|
break;
|
|
|
case KVM_MP_STATE_SIPI_RECEIVED:
|
|
|
default:
|
|
@@ -5277,6 +5293,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
|
|
++vcpu->stat.request_irq_exits;
|
|
|
}
|
|
|
+
|
|
|
+ kvm_check_async_pf_completion(vcpu);
|
|
|
+
|
|
|
if (signal_pending(current)) {
|
|
|
r = -EINTR;
|
|
|
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
|
@@ -5792,6 +5811,10 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
|
|
|
+ kvm_clear_async_pf_completion_queue(vcpu);
|
|
|
+ kvm_async_pf_hash_reset(vcpu);
|
|
|
+ vcpu->arch.apf.halted = false;
|
|
|
+
|
|
|
return kvm_x86_ops->vcpu_reset(vcpu);
|
|
|
}
|
|
|
|
|
@@ -5880,6 +5903,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
|
|
|
goto fail_free_mce_banks;
|
|
|
|
|
|
+ kvm_async_pf_hash_reset(vcpu);
|
|
|
+
|
|
|
return 0;
|
|
|
fail_free_mce_banks:
|
|
|
kfree(vcpu->arch.mce_banks);
|
|
@@ -5938,8 +5963,10 @@ static void kvm_free_vcpus(struct kvm *kvm)
|
|
|
/*
|
|
|
* Unpin any mmu pages first.
|
|
|
*/
|
|
|
- kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
+ kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
+ kvm_clear_async_pf_completion_queue(vcpu);
|
|
|
kvm_unload_vcpu_mmu(vcpu);
|
|
|
+ }
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
kvm_arch_vcpu_free(vcpu);
|
|
|
|
|
@@ -6050,7 +6077,9 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
|
|
|
|
|
|
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
|
|
|
+ return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
|
|
|
+ !vcpu->arch.apf.halted)
|
|
|
+ || !list_empty_careful(&vcpu->async_pf.done)
|
|
|
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|
|
|
|| vcpu->arch.nmi_pending ||
|
|
|
(kvm_arch_interrupt_allowed(vcpu) &&
|
|
@@ -6109,6 +6138,83 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_rflags);
|
|
|
|
|
|
+static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
|
|
|
+{
|
|
|
+ return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
|
|
|
+}
|
|
|
+
|
|
|
+static inline u32 kvm_async_pf_next_probe(u32 key)
|
|
|
+{
|
|
|
+ return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
|
+{
|
|
|
+ u32 key = kvm_async_pf_hash_fn(gfn);
|
|
|
+
|
|
|
+ while (vcpu->arch.apf.gfns[key] != ~0)
|
|
|
+ key = kvm_async_pf_next_probe(key);
|
|
|
+
|
|
|
+ vcpu->arch.apf.gfns[key] = gfn;
|
|
|
+}
|
|
|
+
|
|
|
+static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ u32 key = kvm_async_pf_hash_fn(gfn);
|
|
|
+
|
|
|
+ for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
|
|
|
+ (vcpu->arch.apf.gfns[key] != gfn ||
|
|
|
+ vcpu->arch.apf.gfns[key] == ~0); i++)
|
|
|
+ key = kvm_async_pf_next_probe(key);
|
|
|
+
|
|
|
+ return key;
|
|
|
+}
|
|
|
+
|
|
|
+bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
|
+{
|
|
|
+ return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
|
+{
|
|
|
+ u32 i, j, k;
|
|
|
+
|
|
|
+ i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
|
|
|
+ while (true) {
|
|
|
+ vcpu->arch.apf.gfns[i] = ~0;
|
|
|
+ do {
|
|
|
+ j = kvm_async_pf_next_probe(j);
|
|
|
+ if (vcpu->arch.apf.gfns[j] == ~0)
|
|
|
+ return;
|
|
|
+ k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
|
|
|
+ /*
|
|
|
+ * k lies cyclically in ]i,j]
|
|
|
+ * | i.k.j |
|
|
|
+ * |....j i.k.| or |.k..j i...|
|
|
|
+ */
|
|
|
+ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
|
|
|
+ vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
|
|
|
+ i = j;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_async_pf *work)
|
|
|
+{
|
|
|
+ trace_kvm_async_pf_not_present(work->gva);
|
|
|
+
|
|
|
+ kvm_make_request(KVM_REQ_APF_HALT, vcpu);
|
|
|
+ kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
|
|
|
+}
|
|
|
+
|
|
|
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_async_pf *work)
|
|
|
+{
|
|
|
+ trace_kvm_async_pf_ready(work->gva);
|
|
|
+ kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
|
|
|
+}
|
|
|
+
|
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
|
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
|
|
|
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
|