Skip to content

Commit 0995c4a

Browse files
committed
Add unit tests for Vm trait
Signed-off-by: Ludvig Liljenberg <[email protected]>
1 parent 6d2f287 commit 0995c4a

File tree

1 file changed

+239
-0
lines changed
  • src/hyperlight_host/src/hypervisor/vm

1 file changed

+239
-0
lines changed

src/hyperlight_host/src/hypervisor/vm/mod.rs

Lines changed: 239 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,37 @@ pub(crate) trait Vm: Debug + Send {
181181

182182
#[cfg(test)]
183183
mod tests {
184+
use super::*;
185+
use crate::hypervisor::regs::{CommonSegmentRegister, CommonTableRegister};
186+
187+
fn boxed_vm() -> Box<dyn Vm> {
188+
let available_vm = get_available_hypervisor().as_ref().unwrap();
189+
match available_vm {
190+
#[cfg(kvm)]
191+
HypervisorType::Kvm => {
192+
use crate::hypervisor::vm::kvm::KvmVm;
193+
Box::new(KvmVm::new().unwrap())
194+
}
195+
#[cfg(mshv3)]
196+
HypervisorType::Mshv => {
197+
use crate::hypervisor::vm::mshv::MshvVm;
198+
Box::new(MshvVm::new().unwrap())
199+
}
200+
#[cfg(target_os = "windows")]
201+
HypervisorType::Whp => {
202+
use hyperlight_common::mem::PAGE_SIZE_USIZE;
203+
204+
use crate::hypervisor::vm::whp::WhpVm;
205+
use crate::hypervisor::wrappers::HandleWrapper;
206+
use crate::mem::shared_mem::{ExclusiveSharedMemory, SharedMemory};
207+
208+
let mem_size = PAGE_SIZE_USIZE;
209+
let shared_mem = ExclusiveSharedMemory::new(mem_size).unwrap();
210+
let handle = HandleWrapper::from(shared_mem.get_mmap_file_handle());
211+
Box::new(WhpVm::new(handle, shared_mem.raw_mem_size()).unwrap())
212+
}
213+
}
214+
}
184215

185216
#[test]
186217
// TODO: add support for testing on WHP
@@ -200,4 +231,212 @@ mod tests {
200231
}
201232
}
202233
}
234+
235+
#[test]
236+
fn regs() {
237+
let vm = boxed_vm();
238+
239+
let regs = CommonRegisters {
240+
rax: 1,
241+
rbx: 2,
242+
rcx: 3,
243+
rdx: 4,
244+
rsi: 5,
245+
rdi: 6,
246+
rsp: 7,
247+
rbp: 8,
248+
r8: 9,
249+
r9: 10,
250+
r10: 11,
251+
r11: 12,
252+
r12: 13,
253+
r13: 14,
254+
r14: 15,
255+
r15: 16,
256+
rip: 17,
257+
rflags: 0x2,
258+
};
259+
260+
vm.set_regs(&regs).unwrap();
261+
let read_regs = vm.regs().unwrap();
262+
assert_eq!(regs, read_regs);
263+
}
264+
265+
#[test]
266+
fn fpu() {
267+
let vm = boxed_vm();
268+
269+
// x87 FPU registers are 80-bit (10 bytes), stored in 16-byte slots for alignment.
270+
// Only the first 10 bytes are preserved; the remaining 6 bytes are reserved/zeroed.
271+
// See Intel® 64 and IA-32 Architectures SDM, Vol. 1, Sec. 10.5.1.1 (x87 State)
272+
let fpr_entry: [u8; 16] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0];
273+
let fpu = CommonFpu {
274+
fpr: [fpr_entry; 8],
275+
fcw: 2,
276+
fsw: 3,
277+
ftwx: 4,
278+
last_opcode: 5,
279+
last_ip: 6,
280+
last_dp: 7,
281+
xmm: [[8; 16]; 16],
282+
mxcsr: 9,
283+
pad1: 0,
284+
pad2: 0,
285+
};
286+
vm.set_fpu(&fpu).unwrap();
287+
#[cfg_attr(not(kvm), allow(unused_mut))]
288+
let mut read_fpu = vm.fpu().unwrap();
289+
#[cfg(kvm)]
290+
{
291+
read_fpu.mxcsr = fpu.mxcsr; // KVM get/set fpu does not preserve mxcsr
292+
}
293+
assert_eq!(fpu, read_fpu);
294+
}
295+
296+
#[test]
297+
fn sregs() {
298+
let vm = boxed_vm();
299+
300+
let segment = CommonSegmentRegister {
301+
base: 1,
302+
limit: 2,
303+
selector: 3,
304+
type_: 3,
305+
present: 1,
306+
dpl: 1,
307+
db: 0,
308+
s: 1,
309+
l: 1,
310+
g: 1,
311+
avl: 1,
312+
unusable: 0,
313+
padding: 0,
314+
};
315+
316+
let cs_segment = CommonSegmentRegister {
317+
base: 1,
318+
limit: 0xFFFF,
319+
selector: 0x08,
320+
type_: 0b1011, // code segment, execute/read, accessed
321+
present: 1,
322+
dpl: 1,
323+
db: 0, // must be 0 in 64-bit mode
324+
s: 1,
325+
l: 1, // 64-bit mode
326+
g: 1,
327+
avl: 1,
328+
unusable: 0,
329+
padding: 0,
330+
};
331+
let table = CommonTableRegister {
332+
base: 12,
333+
limit: 13,
334+
};
335+
let sregs = CommonSpecialRegisters {
336+
cs: cs_segment,
337+
ds: segment,
338+
es: segment,
339+
fs: segment,
340+
gs: segment,
341+
ss: segment,
342+
tr: segment,
343+
ldt: segment,
344+
gdt: table,
345+
idt: table,
346+
cr0: 0x80000011, // bit 0 (PE) + bit 4 (ET) + bit 31 (PG)
347+
cr2: 2,
348+
cr3: 3,
349+
cr4: 0x20,
350+
cr8: 5,
351+
efer: 0x500,
352+
apic_base: 0xFEE00900,
353+
interrupt_bitmap: [0; 4],
354+
};
355+
vm.set_sregs(&sregs).unwrap();
356+
let read_sregs = vm.sregs().unwrap();
357+
assert_eq!(sregs, read_sregs);
358+
}
359+
360+
/// Helper to create a page-aligned memory region for testing
361+
#[cfg(any(kvm, mshv3))]
362+
fn create_test_memory(size: usize) -> crate::mem::shared_mem::ExclusiveSharedMemory {
363+
use hyperlight_common::mem::PAGE_SIZE_USIZE;
364+
let aligned_size = size.div_ceil(PAGE_SIZE_USIZE) * PAGE_SIZE_USIZE;
365+
crate::mem::shared_mem::ExclusiveSharedMemory::new(aligned_size).unwrap()
366+
}
367+
368+
/// Helper to create a MemoryRegion from ExclusiveSharedMemory
369+
#[cfg(any(kvm, mshv3))]
370+
fn region_for_test_memory(
371+
mem: &crate::mem::shared_mem::ExclusiveSharedMemory,
372+
guest_base: usize,
373+
flags: crate::mem::memory_region::MemoryRegionFlags,
374+
) -> MemoryRegion {
375+
use crate::mem::memory_region::MemoryRegionType;
376+
use crate::mem::shared_mem::SharedMemory;
377+
let ptr = mem.base_addr();
378+
let len = mem.mem_size();
379+
MemoryRegion {
380+
host_region: ptr..(ptr + len),
381+
guest_region: guest_base..(guest_base + len),
382+
flags,
383+
region_type: MemoryRegionType::Heap,
384+
}
385+
}
386+
387+
#[test]
388+
#[cfg(any(kvm, mshv3))] // Requires memory mapping support (TODO on WHP)
389+
fn map_memory() {
390+
use crate::mem::memory_region::MemoryRegionFlags;
391+
392+
let mut vm = boxed_vm();
393+
394+
let mem1 = create_test_memory(4096);
395+
let guest_addr: usize = 0x1000;
396+
let region = region_for_test_memory(
397+
&mem1,
398+
guest_addr,
399+
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
400+
);
401+
402+
// SAFETY: The memory region points to valid memory allocated by ExclusiveSharedMemory,
403+
// and will live until we drop mem1 at the end of the test.
404+
// Slot 0 is not already mapped.
405+
unsafe {
406+
vm.map_memory((0, &region)).unwrap();
407+
}
408+
409+
// Unmap the region
410+
vm.unmap_memory((0, &region)).unwrap();
411+
412+
// Unmapping a region that was already unmapped should fail
413+
vm.unmap_memory((0, &region)).unwrap_err();
414+
415+
// Unmapping a region that was never mapped should fail
416+
vm.unmap_memory((99, &region)).unwrap_err();
417+
418+
// Re-map the same region to a different slot
419+
// SAFETY: Same as above - memory is still valid and slot 1 is not mapped.
420+
unsafe {
421+
vm.map_memory((1, &region)).unwrap();
422+
}
423+
424+
// Map a second region to a different slot
425+
let mem2 = create_test_memory(4096);
426+
let guest_addr2: usize = 0x2000;
427+
let region2 = region_for_test_memory(
428+
&mem2,
429+
guest_addr2,
430+
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
431+
);
432+
433+
// SAFETY: Memory is valid from ExclusiveSharedMemory, slot 2 is not mapped.
434+
unsafe {
435+
vm.map_memory((2, &region2)).unwrap();
436+
}
437+
438+
// Clean up: unmap both regions
439+
vm.unmap_memory((1, &region)).unwrap();
440+
vm.unmap_memory((2, &region2)).unwrap();
441+
}
203442
}

0 commit comments

Comments
 (0)