@@ -1093,7 +1093,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1093
1093
* For each generation, we track the original measured
1094
1094
* nanosecond time, offset, and write, so if TSCs are in
1095
1095
* sync, we can match exact offset, and if not, we can match
1096
- * exact software computaion in compute_guest_tsc()
1096
+ * exact software computation in compute_guest_tsc()
1097
1097
*
1098
1098
* These values are tracked in kvm->arch.cur_xxx variables.
1099
1099
*/
@@ -1500,7 +1500,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1500
1500
{
1501
1501
gpa_t gpa = data & ~0x3f ;
1502
1502
1503
- /* Bits 2:5 are resrved , Should be zero */
1503
+ /* Bits 2:5 are reserved , Should be zero */
1504
1504
if (data & 0x3c )
1505
1505
return 1 ;
1506
1506
@@ -1723,7 +1723,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1723
1723
* Ignore all writes to this no longer documented MSR.
1724
1724
* Writes are only relevant for old K7 processors,
1725
1725
* all pre-dating SVM, but a recommended workaround from
1726
- * AMD for these chips. It is possible to speicify the
1726
+ * AMD for these chips. It is possible to specify the
1727
1727
* affected processor models on the command line, hence
1728
1728
* the need to ignore the workaround.
1729
1729
*/
@@ -4491,7 +4491,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4491
4491
4492
4492
/*
4493
4493
* if emulation was due to access to shadowed page table
4494
- * and it failed try to unshadow page and re-entetr the
4494
+ * and it failed try to unshadow page and re-enter the
4495
4495
* guest to let CPU execute the instruction.
4496
4496
*/
4497
4497
if (kvm_mmu_unprotect_page_virt (vcpu , gva ))
@@ -5587,7 +5587,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5587
5587
/*
5588
5588
* We are here if userspace calls get_regs() in the middle of
5589
5589
* instruction emulation. Registers state needs to be copied
5590
- * back from emulation context to vcpu. Usrapace shouldn't do
5590
+ * back from emulation context to vcpu. Userspace shouldn't do
5591
5591
* that usually, but some bad designed PV devices (vmware
5592
5592
* backdoor interface) need this to work
5593
5593
*/
@@ -6116,7 +6116,7 @@ int kvm_arch_hardware_enable(void *garbage)
6116
6116
* as we reset last_host_tsc on all VCPUs to stop this from being
6117
6117
* called multiple times (one for each physical CPU bringup).
6118
6118
*
6119
- * Platforms with unnreliable TSCs don't have to deal with this, they
6119
+ * Platforms with unreliable TSCs don't have to deal with this, they
6120
6120
* will be compensated by the logic in vcpu_load, which sets the TSC to
6121
6121
* catchup mode. This will catchup all VCPUs to real time, but cannot
6122
6122
* guarantee that they stay in perfect synchronization.
@@ -6391,7 +6391,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
6391
6391
map_flags = MAP_SHARED | MAP_ANONYMOUS ;
6392
6392
6393
6393
/*To keep backward compatibility with older userspace,
6394
- *x86 needs to hanlde !user_alloc case.
6394
+ *x86 needs to handle !user_alloc case.
6395
6395
*/
6396
6396
if (!user_alloc ) {
6397
6397
if (npages && !old .rmap ) {
0 commit comments