|
@@ -379,15 +379,15 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
|
|
|
static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
|
|
|
int min)
|
|
|
{
|
|
|
- struct page *page;
|
|
|
+ void *page;
|
|
|
|
|
|
if (cache->nobjs >= min)
|
|
|
return 0;
|
|
|
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
|
|
|
- page = alloc_page(GFP_KERNEL);
|
|
|
+ page = (void *)__get_free_page(GFP_KERNEL);
|
|
|
if (!page)
|
|
|
return -ENOMEM;
|
|
|
- cache->objects[cache->nobjs++] = page_address(page);
|
|
|
+ cache->objects[cache->nobjs++] = page;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -1032,9 +1032,9 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
ASSERT(is_empty_shadow_page(sp->spt));
|
|
|
hlist_del(&sp->hash_link);
|
|
|
list_del(&sp->link);
|
|
|
- __free_page(virt_to_page(sp->spt));
|
|
|
+ free_page((unsigned long)sp->spt);
|
|
|
if (!sp->role.direct)
|
|
|
- __free_page(virt_to_page(sp->gfns));
|
|
|
+ free_page((unsigned long)sp->gfns);
|
|
|
kmem_cache_free(mmu_page_header_cache, sp);
|
|
|
kvm_mod_used_mmu_pages(kvm, -1);
|
|
|
}
|