|
@@ -27,6 +27,7 @@
|
|
|
#include <linux/highmem.h>
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/swap.h>
|
|
|
+#include <linux/hugetlb.h>
|
|
|
|
|
|
#include <asm/page.h>
|
|
|
#include <asm/cmpxchg.h>
|
|
@@ -211,6 +212,11 @@ static int is_shadow_present_pte(u64 pte)
|
|
|
&& pte != shadow_notrap_nonpresent_pte;
|
|
|
}
|
|
|
|
|
|
+static int is_large_pte(u64 pte)
|
|
|
+{
|
|
|
+ return pte & PT_PAGE_SIZE_MASK;
|
|
|
+}
|
|
|
+
|
|
|
static int is_writeble_pte(unsigned long pte)
|
|
|
{
|
|
|
return pte & PT_WRITABLE_MASK;
|
|
@@ -349,17 +355,101 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
|
|
|
kfree(rd);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Return the pointer to the largepage write count for a given
|
|
|
+ * gfn, handling slots that are not large page aligned.
|
|
|
+ */
|
|
|
+static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
|
|
|
+{
|
|
|
+ unsigned long idx;
|
|
|
+
|
|
|
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
|
|
|
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
|
|
|
+ return &slot->lpage_info[idx].write_count;
|
|
|
+}
|
|
|
+
|
|
|
+static void account_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
+{
|
|
|
+ int *write_count;
|
|
|
+
|
|
|
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
|
|
|
+ *write_count += 1;
|
|
|
+ WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
|
|
|
+}
|
|
|
+
|
|
|
+static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
|
|
|
+{
|
|
|
+ int *write_count;
|
|
|
+
|
|
|
+ write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
|
|
|
+ *write_count -= 1;
|
|
|
+ WARN_ON(*write_count < 0);
|
|
|
+}
|
|
|
+
|
|
|
+static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
|
|
|
+{
|
|
|
+ struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
|
|
|
+ int *largepage_idx;
|
|
|
+
|
|
|
+ if (slot) {
|
|
|
+ largepage_idx = slot_largepage_idx(gfn, slot);
|
|
|
+ return *largepage_idx;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
|
|
|
+{
|
|
|
+ struct vm_area_struct *vma;
|
|
|
+ unsigned long addr;
|
|
|
+
|
|
|
+ addr = gfn_to_hva(kvm, gfn);
|
|
|
+ if (kvm_is_error_hva(addr))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ vma = find_vma(current->mm, addr);
|
|
|
+ if (vma && is_vm_hugetlb_page(vma))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
|
|
|
+{
|
|
|
+ struct kvm_memory_slot *slot;
|
|
|
+
|
|
|
+ if (has_wrprotected_page(vcpu->kvm, large_gfn))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!host_largepage_backed(vcpu->kvm, large_gfn))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ slot = gfn_to_memslot(vcpu->kvm, large_gfn);
|
|
|
+ if (slot && slot->dirty_bitmap)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Take gfn and return the reverse mapping to it.
|
|
|
* Note: gfn must be unaliased before this function get called
|
|
|
*/
|
|
|
|
|
|
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
|
|
|
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
|
|
|
{
|
|
|
struct kvm_memory_slot *slot;
|
|
|
+ unsigned long idx;
|
|
|
|
|
|
slot = gfn_to_memslot(kvm, gfn);
|
|
|
- return &slot->rmap[gfn - slot->base_gfn];
|
|
|
+ if (!lpage)
|
|
|
+ return &slot->rmap[gfn - slot->base_gfn];
|
|
|
+
|
|
|
+ idx = (gfn / KVM_PAGES_PER_HPAGE) -
|
|
|
+ (slot->base_gfn / KVM_PAGES_PER_HPAGE);
|
|
|
+
|
|
|
+ return &slot->lpage_info[idx].rmap_pde;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -371,7 +461,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
|
|
|
* If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
|
|
|
* containing more mappings.
|
|
|
*/
|
|
|
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
struct kvm_rmap_desc *desc;
|
|
@@ -383,7 +473,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
|
|
|
gfn = unalias_gfn(vcpu->kvm, gfn);
|
|
|
sp = page_header(__pa(spte));
|
|
|
sp->gfns[spte - sp->spt] = gfn;
|
|
|
- rmapp = gfn_to_rmap(vcpu->kvm, gfn);
|
|
|
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
|
|
|
if (!*rmapp) {
|
|
|
rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
|
|
|
*rmapp = (unsigned long)spte;
|
|
@@ -449,7 +539,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
|
|
|
kvm_release_page_dirty(page);
|
|
|
else
|
|
|
kvm_release_page_clean(page);
|
|
|
- rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
|
|
|
+ rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
|
|
|
if (!*rmapp) {
|
|
|
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
|
|
|
BUG();
|
|
@@ -515,7 +605,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|
|
int write_protected = 0;
|
|
|
|
|
|
gfn = unalias_gfn(kvm, gfn);
|
|
|
- rmapp = gfn_to_rmap(kvm, gfn);
|
|
|
+ rmapp = gfn_to_rmap(kvm, gfn, 0);
|
|
|
|
|
|
spte = rmap_next(kvm, rmapp, NULL);
|
|
|
while (spte) {
|
|
@@ -528,8 +618,27 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|
|
}
|
|
|
spte = rmap_next(kvm, rmapp, spte);
|
|
|
}
|
|
|
+ /* check for huge page mappings */
|
|
|
+ rmapp = gfn_to_rmap(kvm, gfn, 1);
|
|
|
+ spte = rmap_next(kvm, rmapp, NULL);
|
|
|
+ while (spte) {
|
|
|
+ BUG_ON(!spte);
|
|
|
+ BUG_ON(!(*spte & PT_PRESENT_MASK));
|
|
|
+ BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
|
|
|
+ pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
|
|
|
+ if (is_writeble_pte(*spte)) {
|
|
|
+ rmap_remove(kvm, spte);
|
|
|
+ --kvm->stat.lpages;
|
|
|
+ set_shadow_pte(spte, shadow_trap_nonpresent_pte);
|
|
|
+ write_protected = 1;
|
|
|
+ }
|
|
|
+ spte = rmap_next(kvm, rmapp, spte);
|
|
|
+ }
|
|
|
+
|
|
|
if (write_protected)
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
+
|
|
|
+ account_shadowed(kvm, gfn);
|
|
|
}
|
|
|
|
|
|
#ifdef MMU_DEBUG
|
|
@@ -747,11 +856,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
|
|
|
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
|
|
ent = pt[i];
|
|
|
|
|
|
+ if (is_shadow_present_pte(ent)) {
|
|
|
+ if (!is_large_pte(ent)) {
|
|
|
+ ent &= PT64_BASE_ADDR_MASK;
|
|
|
+ mmu_page_remove_parent_pte(page_header(ent),
|
|
|
+ &pt[i]);
|
|
|
+ } else {
|
|
|
+ --kvm->stat.lpages;
|
|
|
+ rmap_remove(kvm, &pt[i]);
|
|
|
+ }
|
|
|
+ }
|
|
|
pt[i] = shadow_trap_nonpresent_pte;
|
|
|
- if (!is_shadow_present_pte(ent))
|
|
|
- continue;
|
|
|
- ent &= PT64_BASE_ADDR_MASK;
|
|
|
- mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
|
|
|
}
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
}
|
|
@@ -791,6 +906,8 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
}
|
|
|
kvm_mmu_page_unlink_children(kvm, sp);
|
|
|
if (!sp->root_count) {
|
|
|
+ if (!sp->role.metaphysical)
|
|
|
+ unaccount_shadowed(kvm, sp->gfn);
|
|
|
hlist_del(&sp->hash_link);
|
|
|
kvm_mmu_free_page(kvm, sp);
|
|
|
} else {
|
|
@@ -894,7 +1011,8 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
unsigned pt_access, unsigned pte_access,
|
|
|
int user_fault, int write_fault, int dirty,
|
|
|
- int *ptwrite, gfn_t gfn, struct page *page)
|
|
|
+ int *ptwrite, int largepage, gfn_t gfn,
|
|
|
+ struct page *page)
|
|
|
{
|
|
|
u64 spte;
|
|
|
int was_rmapped = 0;
|
|
@@ -907,15 +1025,29 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
write_fault, user_fault, gfn);
|
|
|
|
|
|
if (is_rmap_pte(*shadow_pte)) {
|
|
|
- if (host_pfn != page_to_pfn(page)) {
|
|
|
+ /*
|
|
|
+ * If we overwrite a PTE page pointer with a 2MB PMD, unlink
|
|
|
+ * the parent of the now unreachable PTE.
|
|
|
+ */
|
|
|
+ if (largepage && !is_large_pte(*shadow_pte)) {
|
|
|
+ struct kvm_mmu_page *child;
|
|
|
+ u64 pte = *shadow_pte;
|
|
|
+
|
|
|
+ child = page_header(pte & PT64_BASE_ADDR_MASK);
|
|
|
+ mmu_page_remove_parent_pte(child, shadow_pte);
|
|
|
+ } else if (host_pfn != page_to_pfn(page)) {
|
|
|
pgprintk("hfn old %lx new %lx\n",
|
|
|
host_pfn, page_to_pfn(page));
|
|
|
rmap_remove(vcpu->kvm, shadow_pte);
|
|
|
+ } else {
|
|
|
+ if (largepage)
|
|
|
+ was_rmapped = is_large_pte(*shadow_pte);
|
|
|
+ else
|
|
|
+ was_rmapped = 1;
|
|
|
}
|
|
|
- else
|
|
|
- was_rmapped = 1;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
* We don't set the accessed bit, since we sometimes want to see
|
|
|
* whether the guest actually used the pte (in order to detect
|
|
@@ -930,6 +1062,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
spte |= PT_PRESENT_MASK;
|
|
|
if (pte_access & ACC_USER_MASK)
|
|
|
spte |= PT_USER_MASK;
|
|
|
+ if (largepage)
|
|
|
+ spte |= PT_PAGE_SIZE_MASK;
|
|
|
|
|
|
spte |= page_to_phys(page);
|
|
|
|
|
@@ -944,7 +1078,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
}
|
|
|
|
|
|
shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
|
|
|
- if (shadow) {
|
|
|
+ if (shadow ||
|
|
|
+ (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
|
|
|
pgprintk("%s: found shadow page for %lx, marking ro\n",
|
|
|
__FUNCTION__, gfn);
|
|
|
pte_access &= ~ACC_WRITE_MASK;
|
|
@@ -963,10 +1098,17 @@ unshadowed:
|
|
|
mark_page_dirty(vcpu->kvm, gfn);
|
|
|
|
|
|
pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
|
|
|
+ pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
|
|
|
+ (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
|
|
|
+ (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
|
|
|
set_shadow_pte(shadow_pte, spte);
|
|
|
+ if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
|
|
|
+ && (spte & PT_PRESENT_MASK))
|
|
|
+ ++vcpu->kvm->stat.lpages;
|
|
|
+
|
|
|
page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
|
|
|
if (!was_rmapped) {
|
|
|
- rmap_add(vcpu, shadow_pte, gfn);
|
|
|
+ rmap_add(vcpu, shadow_pte, gfn, largepage);
|
|
|
if (!is_rmap_pte(*shadow_pte))
|
|
|
kvm_release_page_clean(page);
|
|
|
} else {
|
|
@@ -984,7 +1126,8 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
|
|
|
static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
- gfn_t gfn, struct page *page, int level)
|
|
|
+ int largepage, gfn_t gfn, struct page *page,
|
|
|
+ int level)
|
|
|
{
|
|
|
hpa_t table_addr = vcpu->arch.mmu.root_hpa;
|
|
|
int pt_write = 0;
|
|
@@ -998,7 +1141,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
|
|
|
if (level == 1) {
|
|
|
mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
|
|
|
- 0, write, 1, &pt_write, gfn, page);
|
|
|
+ 0, write, 1, &pt_write, 0, gfn, page);
|
|
|
+ return pt_write;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (largepage && level == 2) {
|
|
|
+ mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
|
|
|
+ 0, write, 1, &pt_write, 1, gfn, page);
|
|
|
return pt_write;
|
|
|
}
|
|
|
|
|
@@ -1027,12 +1176,18 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
|
|
|
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
|
|
{
|
|
|
int r;
|
|
|
+ int largepage = 0;
|
|
|
|
|
|
struct page *page;
|
|
|
|
|
|
down_read(&vcpu->kvm->slots_lock);
|
|
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
|
|
|
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
|
|
|
+ largepage = 1;
|
|
|
+ }
|
|
|
+
|
|
|
page = gfn_to_page(vcpu->kvm, gfn);
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
@@ -1045,7 +1200,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
|
|
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
kvm_mmu_free_some_pages(vcpu);
|
|
|
- r = __direct_map(vcpu, v, write, gfn, page, PT32E_ROOT_LEVEL);
|
|
|
+ r = __direct_map(vcpu, v, write, largepage, gfn, page,
|
|
|
+ PT32E_ROOT_LEVEL);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
|
up_read(&vcpu->kvm->slots_lock);
|
|
@@ -1180,6 +1336,8 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
|
|
|
{
|
|
|
struct page *page;
|
|
|
int r;
|
|
|
+ int largepage = 0;
|
|
|
+ gfn_t gfn = gpa >> PAGE_SHIFT;
|
|
|
|
|
|
ASSERT(vcpu);
|
|
|
ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
|
@@ -1189,7 +1347,11 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
|
|
|
return r;
|
|
|
|
|
|
down_read(¤t->mm->mmap_sem);
|
|
|
- page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
|
|
|
+ if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
|
|
|
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
|
|
|
+ largepage = 1;
|
|
|
+ }
|
|
|
+ page = gfn_to_page(vcpu->kvm, gfn);
|
|
|
if (is_error_page(page)) {
|
|
|
kvm_release_page_clean(page);
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
@@ -1198,7 +1360,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
kvm_mmu_free_some_pages(vcpu);
|
|
|
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
|
|
|
- gpa >> PAGE_SHIFT, page, TDP_ROOT_LEVEL);
|
|
|
+ largepage, gfn, page, TDP_ROOT_LEVEL);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
@@ -1397,7 +1559,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
pte = *spte;
|
|
|
if (is_shadow_present_pte(pte)) {
|
|
|
- if (sp->role.level == PT_PAGE_TABLE_LEVEL)
|
|
|
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
|
|
|
+ is_large_pte(pte))
|
|
|
rmap_remove(vcpu->kvm, spte);
|
|
|
else {
|
|
|
child = page_header(pte & PT64_BASE_ADDR_MASK);
|
|
@@ -1405,6 +1568,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
}
|
|
|
set_shadow_pte(spte, shadow_trap_nonpresent_pte);
|
|
|
+ if (is_large_pte(pte))
|
|
|
+ --vcpu->kvm->stat.lpages;
|
|
|
}
|
|
|
|
|
|
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
|
@@ -1412,7 +1577,8 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
|
|
u64 *spte,
|
|
|
const void *new)
|
|
|
{
|
|
|
- if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
|
|
|
+ if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
|
|
|
+ && !vcpu->arch.update_pte.largepage) {
|
|
|
++vcpu->kvm->stat.mmu_pde_zapped;
|
|
|
return;
|
|
|
}
|
|
@@ -1460,6 +1626,8 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
u64 gpte = 0;
|
|
|
struct page *page;
|
|
|
|
|
|
+ vcpu->arch.update_pte.largepage = 0;
|
|
|
+
|
|
|
if (bytes != 4 && bytes != 8)
|
|
|
return;
|
|
|
|
|
@@ -1487,9 +1655,13 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
return;
|
|
|
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
|
|
|
|
|
|
- down_read(&vcpu->kvm->slots_lock);
|
|
|
+ down_read(¤t->mm->mmap_sem);
|
|
|
+ if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
|
|
|
+ gfn &= ~(KVM_PAGES_PER_HPAGE-1);
|
|
|
+ vcpu->arch.update_pte.largepage = 1;
|
|
|
+ }
|
|
|
page = gfn_to_page(vcpu->kvm, gfn);
|
|
|
- up_read(&vcpu->kvm->slots_lock);
|
|
|
+ up_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
if (is_error_page(page)) {
|
|
|
kvm_release_page_clean(page);
|