|
@@ -1093,7 +1093,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|
|
* For each generation, we track the original measured
|
|
|
* nanosecond time, offset, and write, so if TSCs are in
|
|
|
* sync, we can match exact offset, and if not, we can match
|
|
|
- * exact software computaion in compute_guest_tsc()
|
|
|
+ * exact software computation in compute_guest_tsc()
|
|
|
*
|
|
|
* These values are tracked in kvm->arch.cur_xxx variables.
|
|
|
*/
|
|
@@ -1500,7 +1500,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
|
|
|
{
|
|
|
gpa_t gpa = data & ~0x3f;
|
|
|
|
|
|
- /* Bits 2:5 are resrved, Should be zero */
|
|
|
+ /* Bits 2:5 are reserved, Should be zero */
|
|
|
if (data & 0x3c)
|
|
|
return 1;
|
|
|
|
|
@@ -1723,7 +1723,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
* Ignore all writes to this no longer documented MSR.
|
|
|
* Writes are only relevant for old K7 processors,
|
|
|
* all pre-dating SVM, but a recommended workaround from
|
|
|
- * AMD for these chips. It is possible to speicify the
|
|
|
+ * AMD for these chips. It is possible to specify the
|
|
|
* affected processor models on the command line, hence
|
|
|
* the need to ignore the workaround.
|
|
|
*/
|
|
@@ -4491,7 +4491,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
|
|
|
/*
|
|
|
* if emulation was due to access to shadowed page table
|
|
|
- * and it failed try to unshadow page and re-entetr the
|
|
|
+ * and it failed try to unshadow page and re-enter the
|
|
|
* guest to let CPU execute the instruction.
|
|
|
*/
|
|
|
if (kvm_mmu_unprotect_page_virt(vcpu, gva))
|
|
@@ -5587,7 +5587,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
/*
|
|
|
* We are here if userspace calls get_regs() in the middle of
|
|
|
* instruction emulation. Registers state needs to be copied
|
|
|
- * back from emulation context to vcpu. Usrapace shouldn't do
|
|
|
+ * back from emulation context to vcpu. Userspace shouldn't do
|
|
|
* that usually, but some bad designed PV devices (vmware
|
|
|
* backdoor interface) need this to work
|
|
|
*/
|
|
@@ -6116,7 +6116,7 @@ int kvm_arch_hardware_enable(void *garbage)
|
|
|
* as we reset last_host_tsc on all VCPUs to stop this from being
|
|
|
* called multiple times (one for each physical CPU bringup).
|
|
|
*
|
|
|
- * Platforms with unnreliable TSCs don't have to deal with this, they
|
|
|
+ * Platforms with unreliable TSCs don't have to deal with this, they
|
|
|
* will be compensated by the logic in vcpu_load, which sets the TSC to
|
|
|
* catchup mode. This will catchup all VCPUs to real time, but cannot
|
|
|
* guarantee that they stay in perfect synchronization.
|
|
@@ -6391,7 +6391,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
|
map_flags = MAP_SHARED | MAP_ANONYMOUS;
|
|
|
|
|
|
/*To keep backward compatibility with older userspace,
|
|
|
- *x86 needs to hanlde !user_alloc case.
|
|
|
+ *x86 needs to handle !user_alloc case.
|
|
|
*/
|
|
|
if (!user_alloc) {
|
|
|
if (npages && !old.rmap) {
|