@@ -1653,6 +1653,18 @@ static void init_shadow_page_table(struct kvm_mmu_page *sp)
1653
1653
sp -> spt [i ] = 0ull ;
1654
1654
}
1655
1655
1656
+ static void __clear_sp_write_flooding_count (struct kvm_mmu_page * sp )
1657
+ {
1658
+ sp -> write_flooding_count = 0 ;
1659
+ }
1660
+
1661
+ static void clear_sp_write_flooding_count (u64 * spte )
1662
+ {
1663
+ struct kvm_mmu_page * sp = page_header (__pa (spte ));
1664
+
1665
+ __clear_sp_write_flooding_count (sp );
1666
+ }
1667
+
1656
1668
static struct kvm_mmu_page * kvm_mmu_get_page (struct kvm_vcpu * vcpu ,
1657
1669
gfn_t gfn ,
1658
1670
gva_t gaddr ,
@@ -1696,6 +1708,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1696
1708
} else if (sp -> unsync )
1697
1709
kvm_mmu_mark_parents_unsync (sp );
1698
1710
1711
+ __clear_sp_write_flooding_count (sp );
1699
1712
trace_kvm_mmu_get_page (sp , false);
1700
1713
return sp ;
1701
1714
}
@@ -1848,15 +1861,6 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1848
1861
mmu_page_remove_parent_pte (sp , parent_pte );
1849
1862
}
1850
1863
1851
- static void kvm_mmu_reset_last_pte_updated (struct kvm * kvm )
1852
- {
1853
- int i ;
1854
- struct kvm_vcpu * vcpu ;
1855
-
1856
- kvm_for_each_vcpu (i , vcpu , kvm )
1857
- vcpu -> arch .last_pte_updated = NULL ;
1858
- }
1859
-
1860
1864
static void kvm_mmu_unlink_parents (struct kvm * kvm , struct kvm_mmu_page * sp )
1861
1865
{
1862
1866
u64 * parent_pte ;
@@ -1916,7 +1920,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1916
1920
}
1917
1921
1918
1922
sp -> role .invalid = 1 ;
1919
- kvm_mmu_reset_last_pte_updated (kvm );
1920
1923
return ret ;
1921
1924
}
1922
1925
@@ -2361,8 +2364,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2361
2364
}
2362
2365
}
2363
2366
kvm_release_pfn_clean (pfn );
2364
- if (speculative )
2365
- vcpu -> arch .last_pte_updated = sptep ;
2366
2367
}
2367
2368
2368
2369
static void nonpaging_new_cr3 (struct kvm_vcpu * vcpu )
@@ -3523,13 +3524,6 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
3523
3524
kvm_mmu_flush_tlb (vcpu );
3524
3525
}
3525
3526
3526
- static bool last_updated_pte_accessed (struct kvm_vcpu * vcpu )
3527
- {
3528
- u64 * spte = vcpu -> arch .last_pte_updated ;
3529
-
3530
- return !!(spte && (* spte & shadow_accessed_mask ));
3531
- }
3532
-
3533
3527
static u64 mmu_pte_write_fetch_gpte (struct kvm_vcpu * vcpu , gpa_t * gpa ,
3534
3528
const u8 * new , int * bytes )
3535
3529
{
@@ -3570,22 +3564,16 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
3570
3564
* If we're seeing too many writes to a page, it may no longer be a page table,
3571
3565
* or we may be forking, in which case it is better to unmap the page.
3572
3566
*/
3573
- static bool detect_write_flooding (struct kvm_vcpu * vcpu , gfn_t gfn )
3567
+ static bool detect_write_flooding (struct kvm_mmu_page * sp , u64 * spte )
3574
3568
{
3575
- bool flooded = false;
3576
-
3577
- if (gfn == vcpu -> arch .last_pt_write_gfn
3578
- && !last_updated_pte_accessed (vcpu )) {
3579
- ++ vcpu -> arch .last_pt_write_count ;
3580
- if (vcpu -> arch .last_pt_write_count >= 3 )
3581
- flooded = true;
3582
- } else {
3583
- vcpu -> arch .last_pt_write_gfn = gfn ;
3584
- vcpu -> arch .last_pt_write_count = 1 ;
3585
- vcpu -> arch .last_pte_updated = NULL ;
3586
- }
3569
+ /*
3570
+ * Skip write-flooding detected for the sp whose level is 1, because
3571
+ * it can become unsync, then the guest page is not write-protected.
3572
+ */
3573
+ if (sp -> role .level == 1 )
3574
+ return false;
3587
3575
3588
- return flooded ;
3576
+ return ++ sp -> write_flooding_count >= 3 ;
3589
3577
}
3590
3578
3591
3579
/*
@@ -3657,7 +3645,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3657
3645
LIST_HEAD (invalid_list );
3658
3646
u64 entry , gentry , * spte ;
3659
3647
int npte ;
3660
- bool remote_flush , local_flush , zap_page , flooded , misaligned ;
3648
+ bool remote_flush , local_flush , zap_page ;
3661
3649
3662
3650
/*
3663
3651
* If we don't have indirect shadow pages, it means no page is
@@ -3683,12 +3671,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3683
3671
++ vcpu -> kvm -> stat .mmu_pte_write ;
3684
3672
trace_kvm_mmu_audit (vcpu , AUDIT_PRE_PTE_WRITE );
3685
3673
3686
- flooded = detect_write_flooding (vcpu , gfn );
3687
3674
mask .cr0_wp = mask .cr4_pae = mask .nxe = 1 ;
3688
3675
for_each_gfn_indirect_valid_sp (vcpu -> kvm , sp , gfn , node ) {
3689
- misaligned = detect_write_misaligned (sp , gpa , bytes );
3676
+ spte = get_written_sptes (sp , gpa , & npte );
3690
3677
3691
- if (misaligned || flooded ) {
3678
+ if (detect_write_misaligned (sp , gpa , bytes ) ||
3679
+ detect_write_flooding (sp , spte )) {
3692
3680
zap_page |= !!kvm_mmu_prepare_zap_page (vcpu -> kvm , sp ,
3693
3681
& invalid_list );
3694
3682
++ vcpu -> kvm -> stat .mmu_flooded ;
0 commit comments