|
@@ -489,16 +489,20 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
|
|
*
|
|
*
|
|
* If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
|
|
* If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
|
|
* containing more mappings.
|
|
* containing more mappings.
|
|
|
|
+ *
|
|
|
|
+ * Returns the number of rmap entries before the spte was added or zero if
|
|
|
|
+ * the spte was not added.
|
|
|
|
+ *
|
|
*/
|
|
*/
|
|
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
|
|
|
|
|
|
+static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
|
|
{
|
|
{
|
|
struct kvm_mmu_page *sp;
|
|
struct kvm_mmu_page *sp;
|
|
struct kvm_rmap_desc *desc;
|
|
struct kvm_rmap_desc *desc;
|
|
unsigned long *rmapp;
|
|
unsigned long *rmapp;
|
|
- int i;
|
|
|
|
|
|
+ int i, count = 0;
|
|
|
|
|
|
if (!is_rmap_pte(*spte))
|
|
if (!is_rmap_pte(*spte))
|
|
- return;
|
|
|
|
|
|
+ return count;
|
|
gfn = unalias_gfn(vcpu->kvm, gfn);
|
|
gfn = unalias_gfn(vcpu->kvm, gfn);
|
|
sp = page_header(__pa(spte));
|
|
sp = page_header(__pa(spte));
|
|
sp->gfns[spte - sp->spt] = gfn;
|
|
sp->gfns[spte - sp->spt] = gfn;
|
|
@@ -515,8 +519,10 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
|
|
} else {
|
|
} else {
|
|
rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
|
|
rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
|
|
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
|
|
desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
|
|
- while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
|
|
|
|
|
|
+ while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) {
|
|
desc = desc->more;
|
|
desc = desc->more;
|
|
|
|
+ count += RMAP_EXT;
|
|
|
|
+ }
|
|
if (desc->shadow_ptes[RMAP_EXT-1]) {
|
|
if (desc->shadow_ptes[RMAP_EXT-1]) {
|
|
desc->more = mmu_alloc_rmap_desc(vcpu);
|
|
desc->more = mmu_alloc_rmap_desc(vcpu);
|
|
desc = desc->more;
|
|
desc = desc->more;
|
|
@@ -525,6 +531,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
|
|
;
|
|
;
|
|
desc->shadow_ptes[i] = spte;
|
|
desc->shadow_ptes[i] = spte;
|
|
}
|
|
}
|
|
|
|
+ return count;
|
|
}
|
|
}
|
|
|
|
|
|
static void rmap_desc_remove_entry(unsigned long *rmapp,
|
|
static void rmap_desc_remove_entry(unsigned long *rmapp,
|
|
@@ -754,6 +761,19 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
|
|
return young;
|
|
return young;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#define RMAP_RECYCLE_THRESHOLD 1000
|
|
|
|
+
|
|
|
|
+static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
|
|
|
|
+{
|
|
|
|
+ unsigned long *rmapp;
|
|
|
|
+
|
|
|
|
+ gfn = unalias_gfn(vcpu->kvm, gfn);
|
|
|
|
+ rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
|
|
|
|
+
|
|
|
|
+ kvm_unmap_rmapp(vcpu->kvm, rmapp);
|
|
|
|
+ kvm_flush_remote_tlbs(vcpu->kvm);
|
|
|
|
+}
|
|
|
|
+
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
{
|
|
{
|
|
return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
|
|
return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
|
|
@@ -1741,6 +1761,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
{
|
|
{
|
|
int was_rmapped = 0;
|
|
int was_rmapped = 0;
|
|
int was_writeble = is_writeble_pte(*shadow_pte);
|
|
int was_writeble = is_writeble_pte(*shadow_pte);
|
|
|
|
+ int rmap_count;
|
|
|
|
|
|
pgprintk("%s: spte %llx access %x write_fault %d"
|
|
pgprintk("%s: spte %llx access %x write_fault %d"
|
|
" user_fault %d gfn %lx\n",
|
|
" user_fault %d gfn %lx\n",
|
|
@@ -1782,9 +1803,11 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
|
|
|
page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
|
|
page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
|
|
if (!was_rmapped) {
|
|
if (!was_rmapped) {
|
|
- rmap_add(vcpu, shadow_pte, gfn, largepage);
|
|
|
|
|
|
+ rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
|
|
if (!is_rmap_pte(*shadow_pte))
|
|
if (!is_rmap_pte(*shadow_pte))
|
|
kvm_release_pfn_clean(pfn);
|
|
kvm_release_pfn_clean(pfn);
|
|
|
|
+ if (rmap_count > RMAP_RECYCLE_THRESHOLD)
|
|
|
|
+ rmap_recycle(vcpu, gfn, largepage);
|
|
} else {
|
|
} else {
|
|
if (was_writeble)
|
|
if (was_writeble)
|
|
kvm_release_pfn_dirty(pfn);
|
|
kvm_release_pfn_dirty(pfn);
|