Skip to content

Commit f57f2ef

Browse files
Xiao Guangrongavikivity
authored andcommitted
KVM: MMU: fast prefetch spte on invlpg path
Fast prefetch spte for the unsync shadow page on invlpg path Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent 505aef8 commit f57f2ef

File tree

4 files changed

+36
-40
lines changed

4 files changed

+36
-40
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -461,7 +461,6 @@ struct kvm_arch {
461461
unsigned int n_requested_mmu_pages;
462462
unsigned int n_max_mmu_pages;
463463
unsigned int indirect_shadow_pages;
464-
atomic_t invlpg_counter;
465464
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
466465
/*
467466
* Hash table of struct kvm_mmu_page.
@@ -757,8 +756,7 @@ int fx_init(struct kvm_vcpu *vcpu);
757756

758757
void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
759758
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
760-
const u8 *new, int bytes,
761-
bool guest_initiated);
759+
const u8 *new, int bytes);
762760
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
763761
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
764762
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu.c

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -3531,8 +3531,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
35313531
}
35323532

35333533
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3534-
const u8 *new, int bytes,
3535-
bool guest_initiated)
3534+
const u8 *new, int bytes)
35363535
{
35373536
gfn_t gfn = gpa >> PAGE_SHIFT;
35383537
union kvm_mmu_page_role mask = { .word = 0 };
@@ -3541,7 +3540,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
35413540
LIST_HEAD(invalid_list);
35423541
u64 entry, gentry, *spte;
35433542
unsigned pte_size, page_offset, misaligned, quadrant, offset;
3544-
int level, npte, invlpg_counter, r, flooded = 0;
3543+
int level, npte, r, flooded = 0;
35453544
bool remote_flush, local_flush, zap_page;
35463545

35473546
/*
@@ -3556,19 +3555,16 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
35563555

35573556
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
35583557

3559-
invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
3560-
35613558
/*
35623559
* Assume that the pte write on a page table of the same type
35633560
* as the current vcpu paging mode since we update the sptes only
35643561
* when they have the same mode.
35653562
*/
3566-
if ((is_pae(vcpu) && bytes == 4) || !new) {
3563+
if (is_pae(vcpu) && bytes == 4) {
35673564
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
3568-
if (is_pae(vcpu)) {
3569-
gpa &= ~(gpa_t)7;
3570-
bytes = 8;
3571-
}
3565+
gpa &= ~(gpa_t)7;
3566+
bytes = 8;
3567+
35723568
r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
35733569
if (r)
35743570
gentry = 0;
@@ -3594,22 +3590,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
35943590
*/
35953591
mmu_topup_memory_caches(vcpu);
35963592
spin_lock(&vcpu->kvm->mmu_lock);
3597-
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
3598-
gentry = 0;
35993593
kvm_mmu_free_some_pages(vcpu);
36003594
++vcpu->kvm->stat.mmu_pte_write;
36013595
trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3602-
if (guest_initiated) {
3603-
if (gfn == vcpu->arch.last_pt_write_gfn
3604-
&& !last_updated_pte_accessed(vcpu)) {
3605-
++vcpu->arch.last_pt_write_count;
3606-
if (vcpu->arch.last_pt_write_count >= 3)
3607-
flooded = 1;
3608-
} else {
3609-
vcpu->arch.last_pt_write_gfn = gfn;
3610-
vcpu->arch.last_pt_write_count = 1;
3611-
vcpu->arch.last_pte_updated = NULL;
3612-
}
3596+
if (gfn == vcpu->arch.last_pt_write_gfn
3597+
&& !last_updated_pte_accessed(vcpu)) {
3598+
++vcpu->arch.last_pt_write_count;
3599+
if (vcpu->arch.last_pt_write_count >= 3)
3600+
flooded = 1;
3601+
} else {
3602+
vcpu->arch.last_pt_write_gfn = gfn;
3603+
vcpu->arch.last_pt_write_count = 1;
3604+
vcpu->arch.last_pte_updated = NULL;
36133605
}
36143606

36153607
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;

arch/x86/kvm/paging_tmpl.h

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -672,20 +672,27 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
672672
{
673673
struct kvm_shadow_walk_iterator iterator;
674674
struct kvm_mmu_page *sp;
675-
gpa_t pte_gpa = -1;
676675
int level;
677676
u64 *sptep;
678677

679678
vcpu_clear_mmio_info(vcpu, gva);
680679

681-
spin_lock(&vcpu->kvm->mmu_lock);
680+
/*
681+
* No need to check return value here, rmap_can_add() can
682+
* help us to skip pte prefetch later.
683+
*/
684+
mmu_topup_memory_caches(vcpu);
682685

686+
spin_lock(&vcpu->kvm->mmu_lock);
683687
for_each_shadow_entry(vcpu, gva, iterator) {
684688
level = iterator.level;
685689
sptep = iterator.sptep;
686690

687691
sp = page_header(__pa(sptep));
688692
if (is_last_spte(*sptep, level)) {
693+
pt_element_t gpte;
694+
gpa_t pte_gpa;
695+
689696
if (!sp->unsync)
690697
break;
691698

@@ -694,22 +701,21 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
694701

695702
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
696703
kvm_flush_remote_tlbs(vcpu->kvm);
704+
705+
if (!rmap_can_add(vcpu))
706+
break;
707+
708+
if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
709+
sizeof(pt_element_t)))
710+
break;
711+
712+
FNAME(update_pte)(vcpu, sp, sptep, &gpte);
697713
}
698714

699715
if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
700716
break;
701717
}
702-
703-
atomic_inc(&vcpu->kvm->arch.invlpg_counter);
704-
705718
spin_unlock(&vcpu->kvm->mmu_lock);
706-
707-
if (pte_gpa == -1)
708-
return;
709-
710-
if (mmu_topup_memory_caches(vcpu))
711-
return;
712-
kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
713719
}
714720

715721
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,

arch/x86/kvm/x86.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4087,7 +4087,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
40874087
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
40884088
if (ret < 0)
40894089
return 0;
4090-
kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
4090+
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
40914091
return 1;
40924092
}
40934093

@@ -4324,7 +4324,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
43244324
if (!exchanged)
43254325
return X86EMUL_CMPXCHG_FAILED;
43264326

4327-
kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
4327+
kvm_mmu_pte_write(vcpu, gpa, new, bytes);
43284328

43294329
return X86EMUL_CONTINUE;
43304330

0 commit comments

Comments
 (0)