|
@@ -90,17 +90,6 @@ static struct kvm_stats_debugfs_item {
|
|
|
|
|
|
static struct dentry *debugfs_dir;
|
|
|
|
|
|
-#define CR0_RESERVED_BITS \
|
|
|
- (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
|
|
|
- | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
|
|
|
- | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
|
|
|
-#define CR4_RESERVED_BITS \
|
|
|
- (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
|
|
|
- | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
|
|
|
- | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
|
|
- | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
|
|
|
-
|
|
|
-#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
|
|
|
#define EFER_RESERVED_BITS 0xfffffffffffff2fe
|
|
|
|
|
|
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
|
|
@@ -348,214 +337,6 @@ static void inject_gp(struct kvm_vcpu *vcpu)
|
|
|
kvm_x86_ops->inject_gp(vcpu, 0);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Load the pae pdptrs. Return true is they are all valid.
|
|
|
- */
|
|
|
-static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
-{
|
|
|
- gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
|
|
|
- unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
|
|
|
- int i;
|
|
|
- int ret;
|
|
|
- u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
|
|
|
-
|
|
|
- mutex_lock(&vcpu->kvm->lock);
|
|
|
- ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
|
|
|
- offset * sizeof(u64), sizeof(pdpte));
|
|
|
- if (ret < 0) {
|
|
|
- ret = 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
|
|
|
- if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
|
|
|
- ret = 0;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
- ret = 1;
|
|
|
-
|
|
|
- memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
|
|
|
-out:
|
|
|
- mutex_unlock(&vcpu->kvm->lock);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
-{
|
|
|
- if (cr0 & CR0_RESERVED_BITS) {
|
|
|
- printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
|
|
|
- cr0, vcpu->cr0);
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
|
|
|
- printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
|
|
|
- printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
|
|
|
- "and a clear PE flag\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
- if ((vcpu->shadow_efer & EFER_LME)) {
|
|
|
- int cs_db, cs_l;
|
|
|
-
|
|
|
- if (!is_pae(vcpu)) {
|
|
|
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
|
|
|
- "in long mode while PAE is disabled\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
|
|
|
- if (cs_l) {
|
|
|
- printk(KERN_DEBUG "set_cr0: #GP, start paging "
|
|
|
- "in long mode while CS.L == 1\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
-
|
|
|
- }
|
|
|
- } else
|
|
|
-#endif
|
|
|
- if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
|
|
|
- printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
|
|
|
- "reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- }
|
|
|
-
|
|
|
- kvm_x86_ops->set_cr0(vcpu, cr0);
|
|
|
- vcpu->cr0 = cr0;
|
|
|
-
|
|
|
- mutex_lock(&vcpu->kvm->lock);
|
|
|
- kvm_mmu_reset_context(vcpu);
|
|
|
- mutex_unlock(&vcpu->kvm->lock);
|
|
|
- return;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(set_cr0);
|
|
|
-
|
|
|
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
|
|
|
-{
|
|
|
- set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(lmsw);
|
|
|
-
|
|
|
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
|
-{
|
|
|
- if (cr4 & CR4_RESERVED_BITS) {
|
|
|
- printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (is_long_mode(vcpu)) {
|
|
|
- if (!(cr4 & X86_CR4_PAE)) {
|
|
|
- printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
|
|
|
- "in long mode\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
|
|
|
- && !load_pdptrs(vcpu, vcpu->cr3)) {
|
|
|
- printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (cr4 & X86_CR4_VMXE) {
|
|
|
- printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- kvm_x86_ops->set_cr4(vcpu, cr4);
|
|
|
- vcpu->cr4 = cr4;
|
|
|
- mutex_lock(&vcpu->kvm->lock);
|
|
|
- kvm_mmu_reset_context(vcpu);
|
|
|
- mutex_unlock(&vcpu->kvm->lock);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(set_cr4);
|
|
|
-
|
|
|
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
-{
|
|
|
- if (is_long_mode(vcpu)) {
|
|
|
- if (cr3 & CR3_L_MODE_RESERVED_BITS) {
|
|
|
- printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (is_pae(vcpu)) {
|
|
|
- if (cr3 & CR3_PAE_RESERVED_BITS) {
|
|
|
- printk(KERN_DEBUG
|
|
|
- "set_cr3: #GP, reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
|
|
|
- printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
|
|
|
- "reserved bits\n");
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
- /*
|
|
|
- * We don't check reserved bits in nonpae mode, because
|
|
|
- * this isn't enforced, and VMware depends on this.
|
|
|
- */
|
|
|
- }
|
|
|
-
|
|
|
- mutex_lock(&vcpu->kvm->lock);
|
|
|
- /*
|
|
|
- * Does the new cr3 value map to physical memory? (Note, we
|
|
|
- * catch an invalid cr3 even in real-mode, because it would
|
|
|
- * cause trouble later on when we turn on paging anyway.)
|
|
|
- *
|
|
|
- * A real CPU would silently accept an invalid cr3 and would
|
|
|
- * attempt to use it - with largely undefined (and often hard
|
|
|
- * to debug) behavior on the guest side.
|
|
|
- */
|
|
|
- if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
|
|
|
- inject_gp(vcpu);
|
|
|
- else {
|
|
|
- vcpu->cr3 = cr3;
|
|
|
- vcpu->mmu.new_cr3(vcpu);
|
|
|
- }
|
|
|
- mutex_unlock(&vcpu->kvm->lock);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(set_cr3);
|
|
|
-
|
|
|
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
|
|
|
-{
|
|
|
- if (cr8 & CR8_RESERVED_BITS) {
|
|
|
- printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
|
|
|
- inject_gp(vcpu);
|
|
|
- return;
|
|
|
- }
|
|
|
- if (irqchip_in_kernel(vcpu->kvm))
|
|
|
- kvm_lapic_set_tpr(vcpu, cr8);
|
|
|
- else
|
|
|
- vcpu->cr8 = cr8;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(set_cr8);
|
|
|
-
|
|
|
-unsigned long get_cr8(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- if (irqchip_in_kernel(vcpu->kvm))
|
|
|
- return kvm_lapic_get_cr8(vcpu);
|
|
|
- else
|
|
|
- return vcpu->cr8;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(get_cr8);
|
|
|
-
|
|
|
void fx_init(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned after_mxcsr_mask;
|