|
@@ -297,9 +297,9 @@ static void kvm_register_steal_time(void)
|
|
|
|
|
|
memset(st, 0, sizeof(*st));
|
|
|
|
|
|
- wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
|
|
|
+ wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
|
|
|
printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
|
|
|
- cpu, __pa(st));
|
|
|
+ cpu, slow_virt_to_phys(st));
|
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
|
|
@@ -324,7 +324,7 @@ void __cpuinit kvm_guest_cpu_init(void)
|
|
|
return;
|
|
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
|
|
|
- u64 pa = __pa(&__get_cpu_var(apf_reason));
|
|
|
+ u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
|
|
@@ -340,7 +340,8 @@ void __cpuinit kvm_guest_cpu_init(void)
|
|
|
/* Size alignment is implied but just to make it explicit. */
|
|
|
BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
|
|
|
__get_cpu_var(kvm_apic_eoi) = 0;
|
|
|
- pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
|
|
|
+ pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
|
|
|
+ | KVM_MSR_ENABLED;
|
|
|
wrmsrl(MSR_KVM_PV_EOI_EN, pa);
|
|
|
}
|
|
|
|