|
@@ -2028,20 +2028,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
|
|
|
|
|
|
-static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
|
|
|
-{
|
|
|
- struct kvm_mmu_page *sp;
|
|
|
- struct hlist_node *node;
|
|
|
- LIST_HEAD(invalid_list);
|
|
|
-
|
|
|
- for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
|
|
|
- pgprintk("%s: zap %llx %x\n",
|
|
|
- __func__, gfn, sp->role.word);
|
|
|
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
|
|
- }
|
|
|
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
|
-}
|
|
|
-
|
|
|
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
|
|
|
{
|
|
|
int slot = memslot_id(kvm, gfn);
|
|
@@ -4004,127 +3990,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
|
|
|
return nr_mmu_pages;
|
|
|
}
|
|
|
|
|
|
-static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
|
|
|
- unsigned len)
|
|
|
-{
|
|
|
- if (len > buffer->len)
|
|
|
- return NULL;
|
|
|
- return buffer->ptr;
|
|
|
-}
|
|
|
-
|
|
|
-static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
|
|
|
- unsigned len)
|
|
|
-{
|
|
|
- void *ret;
|
|
|
-
|
|
|
- ret = pv_mmu_peek_buffer(buffer, len);
|
|
|
- if (!ret)
|
|
|
- return ret;
|
|
|
- buffer->ptr += len;
|
|
|
- buffer->len -= len;
|
|
|
- buffer->processed += len;
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
|
|
|
- gpa_t addr, gpa_t value)
|
|
|
-{
|
|
|
- int bytes = 8;
|
|
|
- int r;
|
|
|
-
|
|
|
- if (!is_long_mode(vcpu) && !is_pae(vcpu))
|
|
|
- bytes = 4;
|
|
|
-
|
|
|
- r = mmu_topup_memory_caches(vcpu);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
- if (!emulator_write_phys(vcpu, addr, &value, bytes))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- (void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
|
|
|
-{
|
|
|
- spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
- mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
|
|
|
- spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_pv_mmu_op_buffer *buffer)
|
|
|
-{
|
|
|
- struct kvm_mmu_op_header *header;
|
|
|
-
|
|
|
- header = pv_mmu_peek_buffer(buffer, sizeof *header);
|
|
|
- if (!header)
|
|
|
- return 0;
|
|
|
- switch (header->op) {
|
|
|
- case KVM_MMU_OP_WRITE_PTE: {
|
|
|
- struct kvm_mmu_op_write_pte *wpte;
|
|
|
-
|
|
|
- wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
|
|
|
- if (!wpte)
|
|
|
- return 0;
|
|
|
- return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
|
|
|
- wpte->pte_val);
|
|
|
- }
|
|
|
- case KVM_MMU_OP_FLUSH_TLB: {
|
|
|
- struct kvm_mmu_op_flush_tlb *ftlb;
|
|
|
-
|
|
|
- ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
|
|
|
- if (!ftlb)
|
|
|
- return 0;
|
|
|
- return kvm_pv_mmu_flush_tlb(vcpu);
|
|
|
- }
|
|
|
- case KVM_MMU_OP_RELEASE_PT: {
|
|
|
- struct kvm_mmu_op_release_pt *rpt;
|
|
|
-
|
|
|
- rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
|
|
|
- if (!rpt)
|
|
|
- return 0;
|
|
|
- return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
|
|
|
- }
|
|
|
- default: return 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
|
|
|
- gpa_t addr, unsigned long *ret)
|
|
|
-{
|
|
|
- int r;
|
|
|
- struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
|
|
|
-
|
|
|
- buffer->ptr = buffer->buf;
|
|
|
- buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
|
|
|
- buffer->processed = 0;
|
|
|
-
|
|
|
- r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
-
|
|
|
- while (buffer->len) {
|
|
|
- r = kvm_pv_mmu_op_one(vcpu, buffer);
|
|
|
- if (r < 0)
|
|
|
- goto out;
|
|
|
- if (r == 0)
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- r = 1;
|
|
|
-out:
|
|
|
- *ret = buffer->processed;
|
|
|
- return r;
|
|
|
-}
|
|
|
-
|
|
|
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
|
|
|
{
|
|
|
struct kvm_shadow_walk_iterator iterator;
|