Skip to content

Commit cb9aaa3

Browse files
Xiao Guangrongavikivity
Xiao Guangrong
authored andcommitted
KVM: do not release the error pfn
After commit a276632, the error pfn is replaced by the error code, it need not be released anymore [ The patch has been compiling tested for powerpc ] Signed-off-by: Xiao Guangrong <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent 6cede2e commit cb9aaa3

File tree

6 files changed

+14
-21
lines changed

6 files changed

+14
-21
lines changed

arch/powerpc/kvm/e500_tlb.c

-1
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
524524
if (is_error_pfn(pfn)) {
525525
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
526526
(long)gfn);
527-
kvm_release_pfn_clean(pfn);
528527
return;
529528
}
530529

arch/x86/kvm/mmu.c

+3-4
Original file line numberDiff line numberDiff line change
@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
24962496
rmap_recycle(vcpu, sptep, gfn);
24972497
}
24982498
}
2499-
kvm_release_pfn_clean(pfn);
2499+
2500+
if (!is_error_pfn(pfn))
2501+
kvm_release_pfn_clean(pfn);
25002502
}
25012503

25022504
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
26482650

26492651
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
26502652
{
2651-
kvm_release_pfn_clean(pfn);
26522653
if (pfn == KVM_PFN_ERR_HWPOISON) {
26532654
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
26542655
return 0;
@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
32733274
if (!async)
32743275
return false; /* *pfn has correct page already */
32753276

3276-
kvm_release_pfn_clean(*pfn);
3277-
32783277
if (!prefault && can_do_async_pf(vcpu)) {
32793278
trace_kvm_try_async_get_page(gva, gfn);
32803279
if (kvm_find_async_pf_gfn(vcpu, gfn)) {

arch/x86/kvm/mmu_audit.c

+1-3
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
116116
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
117117
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
118118

119-
if (is_error_pfn(pfn)) {
120-
kvm_release_pfn_clean(pfn);
119+
if (is_error_pfn(pfn))
121120
return;
122-
}
123121

124122
hpa = pfn << PAGE_SHIFT;
125123
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)

arch/x86/kvm/paging_tmpl.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
370370
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
371371
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
372372
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
373-
if (mmu_invalid_pfn(pfn)) {
374-
kvm_release_pfn_clean(pfn);
373+
if (mmu_invalid_pfn(pfn))
375374
return;
376-
}
377375

378376
/*
379377
* we call mmu_set_spte() with host_writable = true because that
@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
448446
gfn = gpte_to_gfn(gpte);
449447
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
450448
pte_access & ACC_WRITE_MASK);
451-
if (mmu_invalid_pfn(pfn)) {
452-
kvm_release_pfn_clean(pfn);
449+
if (mmu_invalid_pfn(pfn))
453450
break;
454-
}
455451

456452
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
457453
NULL, PT_PAGE_TABLE_LEVEL, gfn,

virt/kvm/iommu.c

-1
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
107107
*/
108108
pfn = kvm_pin_pages(slot, gfn, page_size);
109109
if (is_error_pfn(pfn)) {
110-
kvm_release_pfn_clean(pfn);
111110
gfn += 1;
112111
continue;
113112
}

virt/kvm/kvm_main.c

+8-6
Original file line numberDiff line numberDiff line change
@@ -102,9 +102,6 @@ static bool largepages_enabled = true;
102102

103103
bool kvm_is_mmio_pfn(pfn_t pfn)
104104
{
105-
if (is_error_pfn(pfn))
106-
return false;
107-
108105
if (pfn_valid(pfn)) {
109106
int reserved;
110107
struct page *tail = pfn_to_page(pfn);
@@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
11651162

11661163
static struct page *kvm_pfn_to_page(pfn_t pfn)
11671164
{
1168-
WARN_ON(kvm_is_mmio_pfn(pfn));
1165+
if (is_error_pfn(pfn))
1166+
return KVM_ERR_PTR_BAD_PAGE;
11691167

1170-
if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
1168+
if (kvm_is_mmio_pfn(pfn)) {
1169+
WARN_ON(1);
11711170
return KVM_ERR_PTR_BAD_PAGE;
1171+
}
11721172

11731173
return pfn_to_page(pfn);
11741174
}
@@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
11931193

11941194
void kvm_release_pfn_clean(pfn_t pfn)
11951195
{
1196-
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn))
1196+
WARN_ON(is_error_pfn(pfn));
1197+
1198+
if (!kvm_is_mmio_pfn(pfn))
11971199
put_page(pfn_to_page(pfn));
11981200
}
11991201
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

0 commit comments

Comments
 (0)