Skip to content

Commit d01f8d5

Browse files
Xiao Guangrongavikivity
authored andcommitted
KVM: MMU: do not mark accessed bit on pte write path
In current code, the accessed bit is always set when page fault occurred, do not need to set it on pte write path Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent 6f6fbe9 commit d01f8d5

File tree

2 files changed

+1
-22
lines changed

2 files changed

+1
-22
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,6 @@ struct kvm_vcpu_arch {
356356
gfn_t last_pt_write_gfn;
357357
int last_pt_write_count;
358358
u64 *last_pte_updated;
359-
gfn_t last_pte_gfn;
360359

361360
struct fpu guest_fpu;
362361
u64 xcr0;

arch/x86/kvm/mmu.c

Lines changed: 1 addition & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2207,11 +2207,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
22072207
if (set_mmio_spte(sptep, gfn, pfn, pte_access))
22082208
return 0;
22092209

2210-
/*
2211-
* We don't set the accessed bit, since we sometimes want to see
2212-
* whether the guest actually used the pte (in order to detect
2213-
* demand paging).
2214-
*/
22152210
spte = PT_PRESENT_MASK;
22162211
if (!speculative)
22172212
spte |= shadow_accessed_mask;
@@ -2362,10 +2357,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
23622357
}
23632358
}
23642359
kvm_release_pfn_clean(pfn);
2365-
if (speculative) {
2360+
if (speculative)
23662361
vcpu->arch.last_pte_updated = sptep;
2367-
vcpu->arch.last_pte_gfn = gfn;
2368-
}
23692362
}
23702363

23712364
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3533,18 +3526,6 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
35333526
return !!(spte && (*spte & shadow_accessed_mask));
35343527
}
35353528

3536-
static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
3537-
{
3538-
u64 *spte = vcpu->arch.last_pte_updated;
3539-
3540-
if (spte
3541-
&& vcpu->arch.last_pte_gfn == gfn
3542-
&& shadow_accessed_mask
3543-
&& !(*spte & shadow_accessed_mask)
3544-
&& is_shadow_present_pte(*spte))
3545-
set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
3546-
}
3547-
35483529
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
35493530
const u8 *new, int bytes,
35503531
bool guest_initiated)
@@ -3615,7 +3596,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
36153596
++vcpu->kvm->stat.mmu_pte_write;
36163597
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
36173598
if (guest_initiated) {
3618-
kvm_mmu_access_page(vcpu, gfn);
36193599
if (gfn == vcpu->arch.last_pt_write_gfn
36203600
&& !last_updated_pte_accessed(vcpu)) {
36213601
++vcpu->arch.last_pt_write_count;

0 commit comments

Comments
 (0)