@@ -101,17 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
101
101
return (ret != orig_pte );
102
102
}
103
103
104
- static unsigned FNAME (gpte_access )(struct kvm_vcpu * vcpu , pt_element_t gpte )
105
- {
106
- unsigned access ;
107
-
108
- access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK )) | ACC_EXEC_MASK ;
109
- #if PTTYPE == 64
110
- access &= ~(gpte >> PT64_NX_SHIFT );
111
- #endif
112
- return access ;
113
- }
114
-
115
104
static bool FNAME (is_last_gpte )(struct guest_walker * walker ,
116
105
struct kvm_vcpu * vcpu , struct kvm_mmu * mmu ,
117
106
pt_element_t gpte )
@@ -217,7 +206,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
217
206
218
207
last_gpte = FNAME (is_last_gpte )(walker , vcpu , mmu , pte );
219
208
if (last_gpte ) {
220
- pte_access = pt_access & FNAME ( gpte_access ) (vcpu , pte );
209
+ pte_access = pt_access & gpte_access (vcpu , pte );
221
210
/* check if the kernel is fetching from user page */
222
211
if (unlikely (pte_access & PT_USER_MASK ) &&
223
212
kvm_read_cr4_bits (vcpu , X86_CR4_SMEP ))
@@ -268,7 +257,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
268
257
break ;
269
258
}
270
259
271
- pt_access &= FNAME ( gpte_access ) (vcpu , pte );
260
+ pt_access &= gpte_access (vcpu , pte );
272
261
-- walker -> level ;
273
262
}
274
263
@@ -364,7 +353,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
364
353
return ;
365
354
366
355
pgprintk ("%s: gpte %llx spte %p\n" , __func__ , (u64 )gpte , spte );
367
- pte_access = sp -> role .access & FNAME ( gpte_access ) (vcpu , gpte );
356
+ pte_access = sp -> role .access & gpte_access (vcpu , gpte );
368
357
protect_clean_gpte (& pte_access , gpte );
369
358
pfn = gfn_to_pfn_atomic (vcpu -> kvm , gpte_to_gfn (gpte ));
370
359
if (mmu_invalid_pfn (pfn ))
@@ -438,7 +427,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
438
427
if (FNAME (prefetch_invalid_gpte )(vcpu , sp , spte , gpte ))
439
428
continue ;
440
429
441
- pte_access = sp -> role .access & FNAME ( gpte_access ) (vcpu , gpte );
430
+ pte_access = sp -> role .access & gpte_access (vcpu , gpte );
442
431
protect_clean_gpte (& pte_access , gpte );
443
432
gfn = gpte_to_gfn (gpte );
444
433
pfn = pte_prefetch_gfn_to_pfn (vcpu , gfn ,
@@ -791,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
791
780
792
781
gfn = gpte_to_gfn (gpte );
793
782
pte_access = sp -> role .access ;
794
- pte_access &= FNAME ( gpte_access ) (vcpu , gpte );
783
+ pte_access &= gpte_access (vcpu , gpte );
795
784
protect_clean_gpte (& pte_access , gpte );
796
785
797
786
if (sync_mmio_spte (& sp -> spt [i ], gfn , pte_access , & nr_present ))
0 commit comments