|
@@ -31,6 +31,7 @@
|
|
|
#include <linux/bitops.h>
|
|
|
#include <linux/hrtimer.h>
|
|
|
#include <linux/uaccess.h>
|
|
|
+#include <linux/iommu.h>
|
|
|
#include <linux/intel-iommu.h>
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
@@ -180,7 +181,6 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|
|
|
|
|
switch (ext) {
|
|
|
case KVM_CAP_IRQCHIP:
|
|
|
- case KVM_CAP_USER_MEMORY:
|
|
|
case KVM_CAP_MP_STATE:
|
|
|
|
|
|
r = 1;
|
|
@@ -189,7 +189,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
|
|
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
|
|
break;
|
|
|
case KVM_CAP_IOMMU:
|
|
|
- r = intel_iommu_found();
|
|
|
+ r = iommu_found();
|
|
|
break;
|
|
|
default:
|
|
|
r = 0;
|
|
@@ -439,7 +439,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
|
|
expires = div64_u64(itc_diff, cyc_per_usec);
|
|
|
kt = ktime_set(0, 1000 * expires);
|
|
|
|
|
|
- down_read(&vcpu->kvm->slots_lock);
|
|
|
vcpu->arch.ht_active = 1;
|
|
|
hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
|
|
|
|
|
@@ -452,7 +451,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
|
|
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
|
|
|
vcpu->arch.mp_state =
|
|
|
KVM_MP_STATE_RUNNABLE;
|
|
|
- up_read(&vcpu->kvm->slots_lock);
|
|
|
|
|
|
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
|
|
|
return -EINTR;
|
|
@@ -476,6 +474,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_run *kvm_run)
|
|
|
+{
|
|
|
+ printk("VMM: %s", vcpu->arch.log_buf);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_run *kvm_run) = {
|
|
|
[EXIT_REASON_VM_PANIC] = handle_vm_error,
|
|
@@ -487,6 +492,7 @@ static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
|
|
|
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
|
|
|
[EXIT_REASON_IPI] = handle_ipi,
|
|
|
[EXIT_REASON_PTC_G] = handle_global_purge,
|
|
|
+ [EXIT_REASON_DEBUG] = handle_vcpu_debug,
|
|
|
|
|
|
};
|
|
|
|
|
@@ -698,27 +704,24 @@ out:
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Allocate 16M memory for every vm to hold its specific data.
|
|
|
- * Its memory map is defined in kvm_host.h.
|
|
|
- */
|
|
|
static struct kvm *kvm_alloc_kvm(void)
|
|
|
{
|
|
|
|
|
|
struct kvm *kvm;
|
|
|
uint64_t vm_base;
|
|
|
|
|
|
+ BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
|
|
|
+
|
|
|
vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
|
|
|
|
|
|
if (!vm_base)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
- printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
|
|
|
|
|
|
- /* Zero all pages before use! */
|
|
|
memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
|
|
|
-
|
|
|
- kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
|
|
|
+ kvm = (struct kvm *)(vm_base +
|
|
|
+ offsetof(struct kvm_vm_data, kvm_vm_struct));
|
|
|
kvm->arch.vm_base = vm_base;
|
|
|
+ printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
|
|
|
|
|
|
return kvm;
|
|
|
}
|
|
@@ -760,21 +763,12 @@ static void kvm_build_io_pmt(struct kvm *kvm)
|
|
|
|
|
|
static void kvm_init_vm(struct kvm *kvm)
|
|
|
{
|
|
|
- long vm_base;
|
|
|
-
|
|
|
BUG_ON(!kvm);
|
|
|
|
|
|
kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
|
|
|
kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
|
|
|
kvm->arch.vmm_init_rr = VMM_INIT_RR;
|
|
|
|
|
|
- vm_base = kvm->arch.vm_base;
|
|
|
- if (vm_base) {
|
|
|
- kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
|
|
|
- kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
|
|
|
- kvm->arch.vpd_base = vm_base + KVM_VPD_OFS;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
*Fill P2M entries for MMIO/IO ranges
|
|
|
*/
|
|
@@ -838,9 +832,8 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
{
|
|
|
- int i;
|
|
|
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
|
|
|
- int r;
|
|
|
+ int i;
|
|
|
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
@@ -857,18 +850,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
|
|
|
vpd->vpr = regs->vpd.vpr;
|
|
|
|
|
|
- r = -EFAULT;
|
|
|
- r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
|
|
|
- sizeof(union context));
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- r = copy_from_user(vcpu + 1, regs->saved_stack +
|
|
|
- sizeof(struct kvm_vcpu),
|
|
|
- IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- vcpu->arch.exit_data =
|
|
|
- ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
|
|
|
+ memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context));
|
|
|
|
|
|
RESTORE_REGS(mp_state);
|
|
|
RESTORE_REGS(vmm_rr);
|
|
@@ -902,9 +884,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
set_bit(KVM_REQ_RESUME, &vcpu->requests);
|
|
|
|
|
|
vcpu_put(vcpu);
|
|
|
- r = 0;
|
|
|
-out:
|
|
|
- return r;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
long kvm_arch_vm_ioctl(struct file *filp,
|
|
@@ -1166,10 +1147,11 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
/*Set entry address for first run.*/
|
|
|
regs->cr_iip = PALE_RESET_ENTRY;
|
|
|
|
|
|
- /*Initilize itc offset for vcpus*/
|
|
|
+ /*Initialize itc offset for vcpus*/
|
|
|
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
|
|
|
- for (i = 0; i < MAX_VCPU_NUM; i++) {
|
|
|
- v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
|
|
|
+ for (i = 0; i < KVM_MAX_VCPUS; i++) {
|
|
|
+ v = (struct kvm_vcpu *)((char *)vcpu +
|
|
|
+ sizeof(struct kvm_vcpu_data) * i);
|
|
|
v->arch.itc_offset = itc_offset;
|
|
|
v->arch.last_itc = 0;
|
|
|
}
|
|
@@ -1183,7 +1165,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.apic->vcpu = vcpu;
|
|
|
|
|
|
p_ctx->gr[1] = 0;
|
|
|
- p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
|
|
|
+ p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
|
|
|
p_ctx->gr[13] = (unsigned long)vmm_vcpu;
|
|
|
p_ctx->psr = 0x1008522000UL;
|
|
|
p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
|
|
@@ -1218,12 +1200,12 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
vcpu->arch.hlt_timer.function = hlt_timer_fn;
|
|
|
|
|
|
vcpu->arch.last_run_cpu = -1;
|
|
|
- vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
|
|
|
+ vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
|
|
|
vcpu->arch.vsa_base = kvm_vsa_base;
|
|
|
vcpu->arch.__gp = kvm_vmm_gp;
|
|
|
vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
|
|
|
- vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
|
|
|
- vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
|
|
|
+ vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
|
|
|
+ vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
|
|
|
init_ptce_info(vcpu);
|
|
|
|
|
|
r = 0;
|
|
@@ -1273,12 +1255,22 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|
|
int r;
|
|
|
int cpu;
|
|
|
|
|
|
+ BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
|
|
|
+
|
|
|
+ r = -EINVAL;
|
|
|
+ if (id >= KVM_MAX_VCPUS) {
|
|
|
+ printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
|
|
|
+ KVM_MAX_VCPUS);
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
r = -ENOMEM;
|
|
|
if (!vm_base) {
|
|
|
printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
|
|
|
goto fail;
|
|
|
}
|
|
|
- vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
|
|
|
+ vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
|
|
|
+ vcpu_data[id].vcpu_struct));
|
|
|
vcpu->kvm = kvm;
|
|
|
|
|
|
cpu = get_cpu();
|
|
@@ -1374,9 +1366,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
|
|
|
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
{
|
|
|
- int i;
|
|
|
- int r;
|
|
|
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
|
|
|
+ int i;
|
|
|
+
|
|
|
vcpu_load(vcpu);
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
@@ -1391,14 +1383,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
regs->vpd.vpsr = vpd->vpsr;
|
|
|
regs->vpd.vpr = vpd->vpr;
|
|
|
|
|
|
- r = -EFAULT;
|
|
|
- r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
|
|
|
- sizeof(union context));
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
- r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
|
|
|
- if (r)
|
|
|
- goto out;
|
|
|
+ memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context));
|
|
|
+
|
|
|
SAVE_REGS(mp_state);
|
|
|
SAVE_REGS(vmm_rr);
|
|
|
memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
|
|
@@ -1426,10 +1412,9 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
SAVE_REGS(metaphysical_saved_rr4);
|
|
|
SAVE_REGS(fp_psr);
|
|
|
SAVE_REGS(saved_gp);
|
|
|
+
|
|
|
vcpu_put(vcpu);
|
|
|
- r = 0;
|
|
|
-out:
|
|
|
- return r;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|
@@ -1457,6 +1442,9 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|
|
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
|
|
|
unsigned long base_gfn = memslot->base_gfn;
|
|
|
|
|
|
+ if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
pfn = gfn_to_pfn(kvm, base_gfn + i);
|
|
|
if (!kvm_is_mmio_pfn(pfn)) {
|
|
@@ -1631,8 +1619,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
int r, i;
|
|
|
long n, base;
|
|
|
- unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
|
|
|
- + KVM_MEM_DIRTY_LOG_OFS);
|
|
|
+ unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
|
|
|
+ offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
|
|
|
|
|
|
r = -EINVAL;
|
|
|
if (log->slot >= KVM_MEMORY_SLOTS)
|