Skip to content

Commit f759e2b

Browse files
Xiao Guangrongavikivity
authored andcommitted
KVM: MMU: avoid pte_list_desc running out in kvm_mmu_pte_write
kvm_mmu_pte_write is unsafe since we need to alloc pte_list_desc in the function when spte is prefetched, unfortunately, we can not know how many spte need to be prefetched on this path, that means we can use out of the free pte_list_desc object in the cache, and BUG_ON() is triggered, also some path does not fill the cache, such as INS instruction emulated that does not trigger page fault Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent 51cfe38 commit f759e2b

File tree

1 file changed

+20
-5
lines changed

1 file changed

+20
-5
lines changed

arch/x86/kvm/mmu.c

Lines changed: 20 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -593,6 +593,11 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
593593
return 0;
594594
}
595595

596+
static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
597+
{
598+
return cache->nobjs;
599+
}
600+
596601
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
597602
struct kmem_cache *cache)
598603
{
@@ -970,6 +975,14 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
970975
return &linfo->rmap_pde;
971976
}
972977

978+
static bool rmap_can_add(struct kvm_vcpu *vcpu)
979+
{
980+
struct kvm_mmu_memory_cache *cache;
981+
982+
cache = &vcpu->arch.mmu_pte_list_desc_cache;
983+
return mmu_memory_cache_free_objects(cache);
984+
}
985+
973986
static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
974987
{
975988
struct kvm_mmu_page *sp;
@@ -3586,6 +3599,12 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
35863599
break;
35873600
}
35883601

3602+
/*
3603+
* No need to care whether allocation memory is successful
3604+
* or not since pte prefetch is skiped if it does not have
3605+
* enough objects in the cache.
3606+
*/
3607+
mmu_topup_memory_caches(vcpu);
35893608
spin_lock(&vcpu->kvm->mmu_lock);
35903609
if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
35913610
gentry = 0;
@@ -3656,7 +3675,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
36563675
mmu_page_zap_pte(vcpu->kvm, sp, spte);
36573676
if (gentry &&
36583677
!((sp->role.word ^ vcpu->arch.mmu.base_role.word)
3659-
& mask.word))
3678+
& mask.word) && rmap_can_add(vcpu))
36603679
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
36613680
if (!remote_flush && need_remote_flush(entry, *spte))
36623681
remote_flush = true;
@@ -3717,10 +3736,6 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
37173736
goto out;
37183737
}
37193738

3720-
r = mmu_topup_memory_caches(vcpu);
3721-
if (r)
3722-
goto out;
3723-
37243739
er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
37253740

37263741
switch (er) {

0 commit comments

Comments
 (0)