Skip to content

Commit d89cc61

Browse files
Takuya Yoshikawaavikivity
authored andcommitted
KVM: Push rmap into kvm_arch_memory_slot
Two reasons: - x86 can integrate rmap and rmap_pde and remove heuristics in __gfn_to_rmap(). - Some architectures do not need rmap. Since rmap is one of the most memory consuming stuff in KVM, ppc'd better restrict the allocation to Book3S HV. Signed-off-by: Takuya Yoshikawa <[email protected]> Acked-by: Paul Mackerras <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent 65fbe37 commit d89cc61

File tree

9 files changed

+49
-44
lines changed

9 files changed

+49
-44
lines changed

arch/powerpc/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,7 @@ struct revmap_entry {
221221
#define KVMPPC_GOT_PAGE 0x80
222222

223223
struct kvm_arch_memory_slot {
224+
unsigned long *rmap;
224225
};
225226

226227
struct kvm_arch {

arch/powerpc/kvm/book3s_64_mmu_hv.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -705,7 +705,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
705705
goto out_unlock;
706706
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
707707

708-
rmap = &memslot->rmap[gfn - memslot->base_gfn];
708+
rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
709709
lock_rmap(rmap);
710710

711711
/* Check if we might have been invalidated; let the guest retry if so */
@@ -788,7 +788,7 @@ static int kvm_handle_hva_range(struct kvm *kvm,
788788
for (; gfn < gfn_end; ++gfn) {
789789
gfn_t gfn_offset = gfn - memslot->base_gfn;
790790

791-
ret = handler(kvm, &memslot->rmap[gfn_offset], gfn);
791+
ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
792792
retval |= ret;
793793
}
794794
}
@@ -1036,7 +1036,7 @@ long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
10361036
unsigned long *rmapp, *map;
10371037

10381038
preempt_disable();
1039-
rmapp = memslot->rmap;
1039+
rmapp = memslot->arch.rmap;
10401040
map = memslot->dirty_bitmap;
10411041
for (i = 0; i < memslot->npages; ++i) {
10421042
if (kvm_test_clear_dirty(kvm, rmapp))

arch/powerpc/kvm/book3s_hv_rm_mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
8484
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
8585
return;
8686

87-
rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
87+
rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
8888
lock_rmap(rmap);
8989

9090
head = *rmap & KVMPPC_RMAP_INDEX;
@@ -180,7 +180,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
180180
if (!slot_is_aligned(memslot, psize))
181181
return H_PARAMETER;
182182
slot_fn = gfn - memslot->base_gfn;
183-
rmap = &memslot->rmap[slot_fn];
183+
rmap = &memslot->arch.rmap[slot_fn];
184184

185185
if (!kvm->arch.using_mmu_notifiers) {
186186
physp = kvm->arch.slot_phys[memslot->id];

arch/powerpc/kvm/powerpc.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -302,10 +302,18 @@ long kvm_arch_dev_ioctl(struct file *filp,
302302
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
303303
struct kvm_memory_slot *dont)
304304
{
305+
if (!dont || free->arch.rmap != dont->arch.rmap) {
306+
vfree(free->arch.rmap);
307+
free->arch.rmap = NULL;
308+
}
305309
}
306310

307311
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
308312
{
313+
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
314+
if (!slot->arch.rmap)
315+
return -ENOMEM;
316+
309317
return 0;
310318
}
311319

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ struct kvm_lpage_info {
504504
};
505505

506506
struct kvm_arch_memory_slot {
507-
unsigned long *rmap_pde[KVM_NR_PAGE_SIZES - 1];
507+
unsigned long *rmap[KVM_NR_PAGE_SIZES];
508508
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
509509
};
510510

arch/x86/kvm/mmu.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -970,11 +970,8 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
970970
{
971971
unsigned long idx;
972972

973-
if (likely(level == PT_PAGE_TABLE_LEVEL))
974-
return &slot->rmap[gfn - slot->base_gfn];
975-
976973
idx = gfn_to_index(gfn, slot->base_gfn, level);
977-
return &slot->arch.rmap_pde[level - PT_DIRECTORY_LEVEL][idx];
974+
return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
978975
}
979976

980977
/*

arch/x86/kvm/x86.c

Lines changed: 32 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -6303,14 +6303,18 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
63036303
{
63046304
int i;
63056305

6306-
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6307-
if (!dont || free->arch.rmap_pde[i] != dont->arch.rmap_pde[i]) {
6308-
kvm_kvfree(free->arch.rmap_pde[i]);
6309-
free->arch.rmap_pde[i] = NULL;
6306+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6307+
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
6308+
kvm_kvfree(free->arch.rmap[i]);
6309+
free->arch.rmap[i] = NULL;
63106310
}
6311-
if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
6312-
kvm_kvfree(free->arch.lpage_info[i]);
6313-
free->arch.lpage_info[i] = NULL;
6311+
if (i == 0)
6312+
continue;
6313+
6314+
if (!dont || free->arch.lpage_info[i - 1] !=
6315+
dont->arch.lpage_info[i - 1]) {
6316+
kvm_kvfree(free->arch.lpage_info[i - 1]);
6317+
free->arch.lpage_info[i - 1] = NULL;
63146318
}
63156319
}
63166320
}
@@ -6319,28 +6323,30 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
63196323
{
63206324
int i;
63216325

6322-
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6326+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
63236327
unsigned long ugfn;
63246328
int lpages;
6325-
int level = i + 2;
6329+
int level = i + 1;
63266330

63276331
lpages = gfn_to_index(slot->base_gfn + npages - 1,
63286332
slot->base_gfn, level) + 1;
63296333

6330-
slot->arch.rmap_pde[i] =
6331-
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap_pde[i]));
6332-
if (!slot->arch.rmap_pde[i])
6334+
slot->arch.rmap[i] =
6335+
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
6336+
if (!slot->arch.rmap[i])
63336337
goto out_free;
6338+
if (i == 0)
6339+
continue;
63346340

6335-
slot->arch.lpage_info[i] =
6336-
kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
6337-
if (!slot->arch.lpage_info[i])
6341+
slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
6342+
sizeof(*slot->arch.lpage_info[i - 1]));
6343+
if (!slot->arch.lpage_info[i - 1])
63386344
goto out_free;
63396345

63406346
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6341-
slot->arch.lpage_info[i][0].write_count = 1;
6347+
slot->arch.lpage_info[i - 1][0].write_count = 1;
63426348
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6343-
slot->arch.lpage_info[i][lpages - 1].write_count = 1;
6349+
slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
63446350
ugfn = slot->userspace_addr >> PAGE_SHIFT;
63456351
/*
63466352
* If the gfn and userspace address are not aligned wrt each
@@ -6352,18 +6358,21 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
63526358
unsigned long j;
63536359

63546360
for (j = 0; j < lpages; ++j)
6355-
slot->arch.lpage_info[i][j].write_count = 1;
6361+
slot->arch.lpage_info[i - 1][j].write_count = 1;
63566362
}
63576363
}
63586364

63596365
return 0;
63606366

63616367
out_free:
6362-
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6363-
kvm_kvfree(slot->arch.rmap_pde[i]);
6364-
kvm_kvfree(slot->arch.lpage_info[i]);
6365-
slot->arch.rmap_pde[i] = NULL;
6366-
slot->arch.lpage_info[i] = NULL;
6368+
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
6369+
kvm_kvfree(slot->arch.rmap[i]);
6370+
slot->arch.rmap[i] = NULL;
6371+
if (i == 0)
6372+
continue;
6373+
6374+
kvm_kvfree(slot->arch.lpage_info[i - 1]);
6375+
slot->arch.lpage_info[i - 1] = NULL;
63676376
}
63686377
return -ENOMEM;
63696378
}

include/linux/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,6 @@ struct kvm_memory_slot {
213213
gfn_t base_gfn;
214214
unsigned long npages;
215215
unsigned long flags;
216-
unsigned long *rmap;
217216
unsigned long *dirty_bitmap;
218217
struct kvm_arch_memory_slot arch;
219218
unsigned long userspace_addr;

virt/kvm/kvm_main.c

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -550,16 +550,12 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
550550
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
551551
struct kvm_memory_slot *dont)
552552
{
553-
if (!dont || free->rmap != dont->rmap)
554-
vfree(free->rmap);
555-
556553
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
557554
kvm_destroy_dirty_bitmap(free);
558555

559556
kvm_arch_free_memslot(free, dont);
560557

561558
free->npages = 0;
562-
free->rmap = NULL;
563559
}
564560

565561
void kvm_free_physmem(struct kvm *kvm)
@@ -768,11 +764,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
768764
if (npages && !old.npages) {
769765
new.user_alloc = user_alloc;
770766
new.userspace_addr = mem->userspace_addr;
771-
#ifndef CONFIG_S390
772-
new.rmap = vzalloc(npages * sizeof(*new.rmap));
773-
if (!new.rmap)
774-
goto out_free;
775-
#endif /* not defined CONFIG_S390 */
767+
776768
if (kvm_arch_create_memslot(&new, npages))
777769
goto out_free;
778770
}
@@ -831,7 +823,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
831823

832824
/* actual memory is freed via old in kvm_free_physmem_slot below */
833825
if (!npages) {
834-
new.rmap = NULL;
835826
new.dirty_bitmap = NULL;
836827
memset(&new.arch, 0, sizeof(new.arch));
837828
}

0 commit comments

Comments
 (0)