|
@@ -56,7 +56,6 @@ void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void async_pf_execute(struct work_struct *work)
|
|
|
{
|
|
|
- struct page *page = NULL;
|
|
|
struct kvm_async_pf *apf =
|
|
|
container_of(work, struct kvm_async_pf, work);
|
|
|
struct mm_struct *mm = apf->mm;
|
|
@@ -68,13 +67,12 @@ static void async_pf_execute(struct work_struct *work)
|
|
|
|
|
|
use_mm(mm);
|
|
|
down_read(&mm->mmap_sem);
|
|
|
- get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
|
|
|
+ get_user_pages(current, mm, addr, 1, 1, 0, NULL, NULL);
|
|
|
up_read(&mm->mmap_sem);
|
|
|
unuse_mm(mm);
|
|
|
|
|
|
spin_lock(&vcpu->async_pf.lock);
|
|
|
list_add_tail(&apf->link, &vcpu->async_pf.done);
|
|
|
- apf->page = page;
|
|
|
spin_unlock(&vcpu->async_pf.lock);
|
|
|
|
|
|
/*
|
|
@@ -82,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
|
|
|
* this point
|
|
|
*/
|
|
|
|
|
|
- trace_kvm_async_pf_completed(addr, page, gva);
|
|
|
+ trace_kvm_async_pf_completed(addr, gva);
|
|
|
|
|
|
if (waitqueue_active(&vcpu->wq))
|
|
|
wake_up_interruptible(&vcpu->wq);
|
|
@@ -112,8 +110,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
|
|
|
list_entry(vcpu->async_pf.done.next,
|
|
|
typeof(*work), link);
|
|
|
list_del(&work->link);
|
|
|
- if (!is_error_page(work->page))
|
|
|
- kvm_release_page_clean(work->page);
|
|
|
kmem_cache_free(async_pf_cache, work);
|
|
|
}
|
|
|
spin_unlock(&vcpu->async_pf.lock);
|
|
@@ -133,14 +129,11 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
|
|
|
list_del(&work->link);
|
|
|
spin_unlock(&vcpu->async_pf.lock);
|
|
|
|
|
|
- if (work->page)
|
|
|
- kvm_arch_async_page_ready(vcpu, work);
|
|
|
+ kvm_arch_async_page_ready(vcpu, work);
|
|
|
kvm_arch_async_page_present(vcpu, work);
|
|
|
|
|
|
list_del(&work->queue);
|
|
|
vcpu->async_pf.queued--;
|
|
|
- if (!is_error_page(work->page))
|
|
|
- kvm_release_page_clean(work->page);
|
|
|
kmem_cache_free(async_pf_cache, work);
|
|
|
}
|
|
|
}
|
|
@@ -163,7 +156,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
|
|
|
if (!work)
|
|
|
return 0;
|
|
|
|
|
|
- work->page = NULL;
|
|
|
+ work->wakeup_all = false;
|
|
|
work->vcpu = vcpu;
|
|
|
work->gva = gva;
|
|
|
work->addr = gfn_to_hva(vcpu->kvm, gfn);
|
|
@@ -203,7 +196,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
|
|
|
if (!work)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- work->page = KVM_ERR_PTR_BAD_PAGE;
|
|
|
+ work->wakeup_all = true;
|
|
|
INIT_LIST_HEAD(&work->queue); /* for list_del to work */
|
|
|
|
|
|
spin_lock(&vcpu->async_pf.lock);
|