|
@@ -53,6 +53,8 @@
|
|
|
#include <linux/highmem.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
|
|
+#include "book3s.h"
|
|
|
+
|
|
|
/* #define EXIT_DEBUG */
|
|
|
/* #define EXIT_DEBUG_SIMPLE */
|
|
|
/* #define EXIT_DEBUG_INT */
|
|
@@ -66,7 +68,7 @@
|
|
|
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
|
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
-void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
|
|
+static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int me;
|
|
|
int cpu = vcpu->cpu;
|
|
@@ -125,7 +127,7 @@ void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
|
|
|
* purely defensive; they should never fail.)
|
|
|
*/
|
|
|
|
|
|
-void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
+static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
|
|
{
|
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
|
|
|
@@ -143,7 +145,7 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
spin_unlock(&vcpu->arch.tbacct_lock);
|
|
|
}
|
|
|
|
|
|
-void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
|
|
+static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
|
|
|
@@ -155,13 +157,13 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
|
|
|
spin_unlock(&vcpu->arch.tbacct_lock);
|
|
|
}
|
|
|
|
|
|
-void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
|
|
|
+static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
|
|
{
|
|
|
vcpu->arch.shregs.msr = msr;
|
|
|
kvmppc_end_cede(vcpu);
|
|
|
}
|
|
|
|
|
|
-void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
|
|
|
+void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
|
|
|
{
|
|
|
vcpu->arch.pvr = pvr;
|
|
|
}
|
|
@@ -614,8 +616,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|
|
return RESUME_GUEST;
|
|
|
}
|
|
|
|
|
|
-static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
- struct task_struct *tsk)
|
|
|
+static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
+ struct task_struct *tsk)
|
|
|
{
|
|
|
int r = RESUME_HOST;
|
|
|
|
|
@@ -717,8 +719,8 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_sregs *sregs)
|
|
|
+static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_sregs *sregs)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
@@ -732,12 +734,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_sregs *sregs)
|
|
|
+static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_sregs *sregs)
|
|
|
{
|
|
|
int i, j;
|
|
|
|
|
|
- kvmppc_set_pvr(vcpu, sregs->pvr);
|
|
|
+ kvmppc_set_pvr_hv(vcpu, sregs->pvr);
|
|
|
|
|
|
j = 0;
|
|
|
for (i = 0; i < vcpu->arch.slb_nr; i++) {
|
|
@@ -767,7 +769,8 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
|
|
|
spin_unlock(&vc->lock);
|
|
|
}
|
|
|
|
|
|
-int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|
|
+static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|
|
+ union kvmppc_one_reg *val)
|
|
|
{
|
|
|
int r = 0;
|
|
|
long int i;
|
|
@@ -866,7 +869,8 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|
|
+static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|
|
+ union kvmppc_one_reg *val)
|
|
|
{
|
|
|
int r = 0;
|
|
|
long int i;
|
|
@@ -979,14 +983,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_check_processor_compat(void)
|
|
|
-{
|
|
|
- if (cpu_has_feature(CPU_FTR_HVMODE))
|
|
|
- return 0;
|
|
|
- return -EIO;
|
|
|
-}
|
|
|
-
|
|
|
-struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|
|
+static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
|
|
+ unsigned int id)
|
|
|
{
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
int err = -EINVAL;
|
|
@@ -1010,8 +1008,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
|
|
|
vcpu->arch.mmcr[0] = MMCR0_FC;
|
|
|
vcpu->arch.ctrl = CTRL_RUNLATCH;
|
|
|
/* default to host PVR, since we can't spoof it */
|
|
|
- vcpu->arch.pvr = mfspr(SPRN_PVR);
|
|
|
- kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
|
|
|
+ kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
|
|
|
spin_lock_init(&vcpu->arch.vpa_update_lock);
|
|
|
spin_lock_init(&vcpu->arch.tbacct_lock);
|
|
|
vcpu->arch.busy_preempt = TB_NIL;
|
|
@@ -1064,7 +1061,7 @@ static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
|
|
|
vpa->dirty);
|
|
|
}
|
|
|
|
|
|
-void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|
|
+static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
spin_lock(&vcpu->arch.vpa_update_lock);
|
|
|
unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
|
|
@@ -1075,6 +1072,12 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
|
|
|
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
|
|
}
|
|
|
|
|
|
+static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ /* Indicate we want to get back into the guest */
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned long dec_nsec, now;
|
|
@@ -1356,8 +1359,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
|
|
|
|
ret = RESUME_GUEST;
|
|
|
if (vcpu->arch.trap)
|
|
|
- ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
|
|
|
- vcpu->arch.run_task);
|
|
|
+ ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
|
|
|
+ vcpu->arch.run_task);
|
|
|
|
|
|
vcpu->arch.ret = ret;
|
|
|
vcpu->arch.trap = 0;
|
|
@@ -1516,7 +1519,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
|
return vcpu->arch.ret;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
+static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int r;
|
|
|
int srcu_idx;
|
|
@@ -1638,7 +1641,8 @@ static const struct file_operations kvm_rma_fops = {
|
|
|
.release = kvm_rma_release,
|
|
|
};
|
|
|
|
|
|
-long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
|
|
|
+static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
|
|
|
+ struct kvm_allocate_rma *ret)
|
|
|
{
|
|
|
long fd;
|
|
|
struct kvm_rma_info *ri;
|
|
@@ -1684,7 +1688,8 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
|
|
|
(*sps)++;
|
|
|
}
|
|
|
|
|
|
-int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
|
|
+static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
|
|
|
+ struct kvm_ppc_smmu_info *info)
|
|
|
{
|
|
|
struct kvm_ppc_one_seg_page_size *sps;
|
|
|
|
|
@@ -1705,7 +1710,8 @@ int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
|
|
|
/*
|
|
|
* Get (and clear) the dirty memory log for a memory slot.
|
|
|
*/
|
|
|
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
|
|
+static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
|
|
+ struct kvm_dirty_log *log)
|
|
|
{
|
|
|
struct kvm_memory_slot *memslot;
|
|
|
int r;
|
|
@@ -1759,8 +1765,8 @@ static void unpin_slot(struct kvm_memory_slot *memslot)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
|
|
- struct kvm_memory_slot *dont)
|
|
|
+static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
|
|
+ struct kvm_memory_slot *dont)
|
|
|
{
|
|
|
if (!dont || free->arch.rmap != dont->arch.rmap) {
|
|
|
vfree(free->arch.rmap);
|
|
@@ -1773,8 +1779,8 @@ void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
|
|
- unsigned long npages)
|
|
|
+static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
|
|
+ unsigned long npages)
|
|
|
{
|
|
|
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
|
|
|
if (!slot->arch.rmap)
|
|
@@ -1784,9 +1790,9 @@ int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
|
|
- struct kvm_memory_slot *memslot,
|
|
|
- struct kvm_userspace_memory_region *mem)
|
|
|
+static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
|
|
|
+ struct kvm_memory_slot *memslot,
|
|
|
+ struct kvm_userspace_memory_region *mem)
|
|
|
{
|
|
|
unsigned long *phys;
|
|
|
|
|
@@ -1802,9 +1808,9 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
|
|
- struct kvm_userspace_memory_region *mem,
|
|
|
- const struct kvm_memory_slot *old)
|
|
|
+static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
|
|
|
+ struct kvm_userspace_memory_region *mem,
|
|
|
+ const struct kvm_memory_slot *old)
|
|
|
{
|
|
|
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
|
|
struct kvm_memory_slot *memslot;
|
|
@@ -1847,6 +1853,11 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int err = 0;
|
|
@@ -1994,7 +2005,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
|
goto out_srcu;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_init_vm(struct kvm *kvm)
|
|
|
+static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
|
|
{
|
|
|
unsigned long lpcr, lpid;
|
|
|
|
|
@@ -2012,9 +2023,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|
|
*/
|
|
|
cpumask_setall(&kvm->arch.need_tlb_flush);
|
|
|
|
|
|
- INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
|
|
|
- INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
|
|
-
|
|
|
kvm->arch.rma = NULL;
|
|
|
|
|
|
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
|
|
@@ -2059,7 +2067,7 @@ static void kvmppc_free_vcores(struct kvm *kvm)
|
|
|
kvm->arch.online_vcores = 0;
|
|
|
}
|
|
|
|
|
|
-void kvmppc_core_destroy_vm(struct kvm *kvm)
|
|
|
+static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
|
|
|
{
|
|
|
uninhibit_secondary_onlining();
|
|
|
|
|
@@ -2069,39 +2077,127 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
|
|
|
kvm->arch.rma = NULL;
|
|
|
}
|
|
|
|
|
|
- kvmppc_rtas_tokens_free(kvm);
|
|
|
-
|
|
|
kvmppc_free_hpt(kvm);
|
|
|
- WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
|
|
|
}
|
|
|
|
|
|
-/* These are stubs for now */
|
|
|
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
|
|
+/* We don't need to emulate any privileged instructions or dcbz */
|
|
|
+static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
+ unsigned int inst, int *advance)
|
|
|
{
|
|
|
+ return EMULATE_FAIL;
|
|
|
}
|
|
|
|
|
|
-/* We don't need to emulate any privileged instructions or dcbz */
|
|
|
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
- unsigned int inst, int *advance)
|
|
|
+static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
|
|
|
+ ulong spr_val)
|
|
|
{
|
|
|
return EMULATE_FAIL;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
|
|
+static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
|
|
|
+ ulong *spr_val)
|
|
|
{
|
|
|
return EMULATE_FAIL;
|
|
|
}
|
|
|
|
|
|
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
|
|
|
+static int kvmppc_core_check_processor_compat_hv(void)
|
|
|
{
|
|
|
- return EMULATE_FAIL;
|
|
|
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
|
|
|
+ return -EIO;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int kvmppc_book3s_hv_init(void)
|
|
|
+static long kvm_arch_vm_ioctl_hv(struct file *filp,
|
|
|
+ unsigned int ioctl, unsigned long arg)
|
|
|
+{
|
|
|
+ struct kvm *kvm __maybe_unused = filp->private_data;
|
|
|
+ void __user *argp = (void __user *)arg;
|
|
|
+ long r;
|
|
|
+
|
|
|
+ switch (ioctl) {
|
|
|
+
|
|
|
+ case KVM_ALLOCATE_RMA: {
|
|
|
+ struct kvm_allocate_rma rma;
|
|
|
+ struct kvm *kvm = filp->private_data;
|
|
|
+
|
|
|
+ r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
|
|
|
+ if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
|
|
|
+ r = -EFAULT;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ case KVM_PPC_ALLOCATE_HTAB: {
|
|
|
+ u32 htab_order;
|
|
|
+
|
|
|
+ r = -EFAULT;
|
|
|
+ if (get_user(htab_order, (u32 __user *)argp))
|
|
|
+ break;
|
|
|
+ r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
|
|
|
+ if (r)
|
|
|
+ break;
|
|
|
+ r = -EFAULT;
|
|
|
+ if (put_user(htab_order, (u32 __user *)argp))
|
|
|
+ break;
|
|
|
+ r = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ case KVM_PPC_GET_HTAB_FD: {
|
|
|
+ struct kvm_get_htab_fd ghf;
|
|
|
+
|
|
|
+ r = -EFAULT;
|
|
|
+ if (copy_from_user(&ghf, argp, sizeof(ghf)))
|
|
|
+ break;
|
|
|
+ r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ default:
|
|
|
+ r = -ENOTTY;
|
|
|
+ }
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static struct kvmppc_ops kvmppc_hv_ops = {
|
|
|
+ .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
|
|
|
+ .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
|
|
|
+ .get_one_reg = kvmppc_get_one_reg_hv,
|
|
|
+ .set_one_reg = kvmppc_set_one_reg_hv,
|
|
|
+ .vcpu_load = kvmppc_core_vcpu_load_hv,
|
|
|
+ .vcpu_put = kvmppc_core_vcpu_put_hv,
|
|
|
+ .set_msr = kvmppc_set_msr_hv,
|
|
|
+ .vcpu_run = kvmppc_vcpu_run_hv,
|
|
|
+ .vcpu_create = kvmppc_core_vcpu_create_hv,
|
|
|
+ .vcpu_free = kvmppc_core_vcpu_free_hv,
|
|
|
+ .check_requests = kvmppc_core_check_requests_hv,
|
|
|
+ .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
|
|
|
+ .flush_memslot = kvmppc_core_flush_memslot_hv,
|
|
|
+ .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
|
|
|
+ .commit_memory_region = kvmppc_core_commit_memory_region_hv,
|
|
|
+ .unmap_hva = kvm_unmap_hva_hv,
|
|
|
+ .unmap_hva_range = kvm_unmap_hva_range_hv,
|
|
|
+ .age_hva = kvm_age_hva_hv,
|
|
|
+ .test_age_hva = kvm_test_age_hva_hv,
|
|
|
+ .set_spte_hva = kvm_set_spte_hva_hv,
|
|
|
+ .mmu_destroy = kvmppc_mmu_destroy_hv,
|
|
|
+ .free_memslot = kvmppc_core_free_memslot_hv,
|
|
|
+ .create_memslot = kvmppc_core_create_memslot_hv,
|
|
|
+ .init_vm = kvmppc_core_init_vm_hv,
|
|
|
+ .destroy_vm = kvmppc_core_destroy_vm_hv,
|
|
|
+ .check_processor_compat = kvmppc_core_check_processor_compat_hv,
|
|
|
+ .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
|
|
|
+ .emulate_op = kvmppc_core_emulate_op_hv,
|
|
|
+ .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
|
|
|
+ .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
|
|
|
+ .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
|
|
|
+ .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
|
|
|
+};
|
|
|
+
|
|
|
+static int kvmppc_book3s_init_hv(void)
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
- r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
|
|
+ r = kvm_init(&kvmppc_hv_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
|
|
|
|
|
if (r)
|
|
|
return r;
|
|
@@ -2111,10 +2207,10 @@ static int kvmppc_book3s_hv_init(void)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
-static void kvmppc_book3s_hv_exit(void)
|
|
|
+static void kvmppc_book3s_exit_hv(void)
|
|
|
{
|
|
|
kvm_exit();
|
|
|
}
|
|
|
|
|
|
-module_init(kvmppc_book3s_hv_init);
|
|
|
-module_exit(kvmppc_book3s_hv_exit);
|
|
|
+module_init(kvmppc_book3s_init_hv);
|
|
|
+module_exit(kvmppc_book3s_exit_hv);
|