Skip to content

Commit ec04b26

Browse files
joergroedelavikivity
authored andcommitted
KVM: Prepare memslot data structures for multiple hugepage sizes
[avi: fix build on non-x86] Signed-off-by: Joerg Roedel <[email protected]> Signed-off-by: Avi Kivity <[email protected]>
1 parent f340ca0 commit ec04b26

File tree

8 files changed

+73
-42
lines changed

8 files changed

+73
-42
lines changed

arch/ia64/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,8 @@ struct kvm_vm_data {
235235
#define KVM_REQ_PTC_G 32
236236
#define KVM_REQ_RESUME 33
237237

238-
#define KVM_PAGES_PER_HPAGE 1
238+
#define KVM_NR_PAGE_SIZES 1
239+
#define KVM_PAGES_PER_HPAGE(x) 1
239240

240241
struct kvm;
241242
struct kvm_vcpu;

arch/powerpc/include/asm/kvm_host.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,8 @@
3434
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
3535

3636
/* We don't currently support large pages. */
37-
#define KVM_PAGES_PER_HPAGE (1UL << 31)
37+
#define KVM_NR_PAGE_SIZES 1
38+
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
3839

3940
struct kvm;
4041
struct kvm_run;

arch/s390/include/asm/kvm_host.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,11 @@ struct sca_block {
4040
struct sca_entry cpu[64];
4141
} __attribute__((packed));
4242

43-
#define KVM_PAGES_PER_HPAGE 256
43+
#define KVM_NR_PAGE_SIZES 2
44+
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + ((x) - 1) * 8)
45+
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
46+
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
47+
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
4448

4549
#define CPUSTAT_HOST 0x80000000
4650
#define CPUSTAT_WAIT 0x10000000

arch/x86/include/asm/kvm_host.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,12 @@
5454
#define INVALID_PAGE (~(hpa_t)0)
5555
#define UNMAPPED_GVA (~(gpa_t)0)
5656

57-
/* shadow tables are PAE even on non-PAE hosts */
58-
#define KVM_HPAGE_SHIFT 21
59-
#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
60-
#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
61-
62-
#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
57+
/* KVM Hugepage definitions for x86 */
58+
#define KVM_NR_PAGE_SIZES 2
59+
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
60+
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
61+
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
62+
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
6363

6464
#define DE_VECTOR 0
6565
#define DB_VECTOR 1

arch/x86/kvm/mmu.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -394,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
394394
{
395395
unsigned long idx;
396396

397-
idx = (gfn / KVM_PAGES_PER_HPAGE) -
398-
(slot->base_gfn / KVM_PAGES_PER_HPAGE);
399-
return &slot->lpage_info[idx].write_count;
397+
idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
398+
(slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
399+
return &slot->lpage_info[0][idx].write_count;
400400
}
401401

402402
static void account_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -485,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
485485
if (!lpage)
486486
return &slot->rmap[gfn - slot->base_gfn];
487487

488-
idx = (gfn / KVM_PAGES_PER_HPAGE) -
489-
(slot->base_gfn / KVM_PAGES_PER_HPAGE);
488+
idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
489+
(slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
490490

491-
return &slot->lpage_info[idx].rmap_pde;
491+
return &slot->lpage_info[0][idx].rmap_pde;
492492
}
493493

494494
/*
@@ -731,11 +731,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
731731
end = start + (memslot->npages << PAGE_SHIFT);
732732
if (hva >= start && hva < end) {
733733
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
734+
int idx = gfn_offset /
735+
KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
734736
retval |= handler(kvm, &memslot->rmap[gfn_offset]);
735737
retval |= handler(kvm,
736-
&memslot->lpage_info[
737-
gfn_offset /
738-
KVM_PAGES_PER_HPAGE].rmap_pde);
738+
&memslot->lpage_info[0][idx].rmap_pde);
739739
}
740740
}
741741

@@ -1876,8 +1876,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
18761876
pfn_t pfn;
18771877
unsigned long mmu_seq;
18781878

1879-
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1880-
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1879+
if (is_largepage_backed(vcpu, gfn &
1880+
~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
1881+
gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
18811882
largepage = 1;
18821883
}
18831884

@@ -2082,8 +2083,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
20822083
if (r)
20832084
return r;
20842085

2085-
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
2086-
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2086+
if (is_largepage_backed(vcpu, gfn &
2087+
~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
2088+
gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
20872089
largepage = 1;
20882090
}
20892091
mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -2485,7 +2487,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
24852487
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
24862488

24872489
if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2488-
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2490+
gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
24892491
vcpu->arch.update_pte.largepage = 1;
24902492
}
24912493
vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;

arch/x86/kvm/paging_tmpl.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
401401

402402
if (walker.level == PT_DIRECTORY_LEVEL) {
403403
gfn_t large_gfn;
404-
large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
404+
large_gfn = walker.gfn &
405+
~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
405406
if (is_largepage_backed(vcpu, large_gfn)) {
406407
walker.gfn = large_gfn;
407408
largepage = 1;

include/linux/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ struct kvm_memory_slot {
103103
struct {
104104
unsigned long rmap_pde;
105105
int write_count;
106-
} *lpage_info;
106+
} *lpage_info[KVM_NR_PAGE_SIZES - 1];
107107
unsigned long userspace_addr;
108108
int user_alloc;
109109
};

virt/kvm/kvm_main.c

Lines changed: 39 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,19 +1001,25 @@ static struct kvm *kvm_create_vm(void)
10011001
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
10021002
struct kvm_memory_slot *dont)
10031003
{
1004+
int i;
1005+
10041006
if (!dont || free->rmap != dont->rmap)
10051007
vfree(free->rmap);
10061008

10071009
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
10081010
vfree(free->dirty_bitmap);
10091011

1010-
if (!dont || free->lpage_info != dont->lpage_info)
1011-
vfree(free->lpage_info);
1012+
1013+
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1014+
if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1015+
vfree(free->lpage_info[i]);
1016+
free->lpage_info[i] = NULL;
1017+
}
1018+
}
10121019

10131020
free->npages = 0;
10141021
free->dirty_bitmap = NULL;
10151022
free->rmap = NULL;
1016-
free->lpage_info = NULL;
10171023
}
10181024

10191025
void kvm_free_physmem(struct kvm *kvm)
@@ -1087,7 +1093,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
10871093
int r;
10881094
gfn_t base_gfn;
10891095
unsigned long npages, ugfn;
1090-
unsigned long largepages, i;
1096+
int lpages;
1097+
unsigned long i, j;
10911098
struct kvm_memory_slot *memslot;
10921099
struct kvm_memory_slot old, new;
10931100

@@ -1161,33 +1168,48 @@ int __kvm_set_memory_region(struct kvm *kvm,
11611168
else
11621169
new.userspace_addr = 0;
11631170
}
1164-
if (npages && !new.lpage_info) {
1165-
largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
1166-
largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
1171+
if (!npages)
1172+
goto skip_lpage;
11671173

1168-
new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
1174+
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1175+
int level = i + 2;
11691176

1170-
if (!new.lpage_info)
1177+
/* Avoid unused variable warning if no large pages */
1178+
(void)level;
1179+
1180+
if (new.lpage_info[i])
1181+
continue;
1182+
1183+
lpages = 1 + (base_gfn + npages - 1) /
1184+
KVM_PAGES_PER_HPAGE(level);
1185+
lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1186+
1187+
new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1188+
1189+
if (!new.lpage_info[i])
11711190
goto out_free;
11721191

1173-
memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
1192+
memset(new.lpage_info[i], 0,
1193+
lpages * sizeof(*new.lpage_info[i]));
11741194

1175-
if (base_gfn % KVM_PAGES_PER_HPAGE)
1176-
new.lpage_info[0].write_count = 1;
1177-
if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
1178-
new.lpage_info[largepages-1].write_count = 1;
1195+
if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1196+
new.lpage_info[i][0].write_count = 1;
1197+
if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1198+
new.lpage_info[i][lpages - 1].write_count = 1;
11791199
ugfn = new.userspace_addr >> PAGE_SHIFT;
11801200
/*
11811201
* If the gfn and userspace address are not aligned wrt each
11821202
* other, or if explicitly asked to, disable large page
11831203
* support for this slot
11841204
*/
1185-
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE - 1) ||
1205+
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
11861206
!largepages_enabled)
1187-
for (i = 0; i < largepages; ++i)
1188-
new.lpage_info[i].write_count = 1;
1207+
for (j = 0; j < lpages; ++j)
1208+
new.lpage_info[i][j].write_count = 1;
11891209
}
11901210

1211+
skip_lpage:
1212+
11911213
/* Allocate page dirty bitmap if needed */
11921214
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
11931215
unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

0 commit comments

Comments
 (0)