@@ -181,6 +181,36 @@ pub(crate) trait Vm: Debug + Send {
181181
182182#[ cfg( test) ]
183183mod tests {
184+ use super :: * ;
185+ use crate :: hypervisor:: regs:: { CommonSegmentRegister , CommonTableRegister } ;
186+
187+ fn boxed_vm ( ) -> Box < dyn Vm > {
188+ let available_vm = get_available_hypervisor ( ) . as_ref ( ) . unwrap ( ) ;
189+ match available_vm {
190+ #[ cfg( kvm) ]
191+ HypervisorType :: Kvm => {
192+ use crate :: hypervisor:: vm:: kvm:: KvmVm ;
193+ Box :: new ( KvmVm :: new ( ) . unwrap ( ) )
194+ }
195+ #[ cfg( mshv3) ]
196+ HypervisorType :: Mshv => {
197+ use crate :: hypervisor:: vm:: mshv:: MshvVm ;
198+ Box :: new ( MshvVm :: new ( ) . unwrap ( ) )
199+ }
200+ #[ cfg( target_os = "windows" ) ]
201+ HypervisorType :: Whp => {
202+ use crate :: hypervisor:: vm:: whp:: WhpVm ;
203+ use crate :: hypervisor:: wrappers:: HandleWrapper ;
204+ use crate :: mem:: shared_mem:: { ExclusiveSharedMemory , SharedMemory } ;
205+ use hyperlight_common:: mem:: PAGE_SIZE_USIZE ;
206+
207+ let mem_size = PAGE_SIZE_USIZE ;
208+ let shared_mem = ExclusiveSharedMemory :: new ( mem_size) . unwrap ( ) ;
209+ let handle = HandleWrapper :: from ( shared_mem. get_mmap_file_handle ( ) ) ;
210+ Box :: new ( WhpVm :: new ( handle, shared_mem. raw_mem_size ( ) ) . unwrap ( ) )
211+ }
212+ }
213+ }
184214
185215 #[ test]
186216 // TODO: add support for testing on WHP
@@ -200,4 +230,212 @@ mod tests {
200230 }
201231 }
202232 }
233+
234+ #[ test]
235+ fn regs ( ) {
236+ let vm = boxed_vm ( ) ;
237+
238+ let regs = CommonRegisters {
239+ rax : 1 ,
240+ rbx : 2 ,
241+ rcx : 3 ,
242+ rdx : 4 ,
243+ rsi : 5 ,
244+ rdi : 6 ,
245+ rsp : 7 ,
246+ rbp : 8 ,
247+ r8 : 9 ,
248+ r9 : 10 ,
249+ r10 : 11 ,
250+ r11 : 12 ,
251+ r12 : 13 ,
252+ r13 : 14 ,
253+ r14 : 15 ,
254+ r15 : 16 ,
255+ rip : 17 ,
256+ rflags : 0x2 ,
257+ } ;
258+
259+ vm. set_regs ( & regs) . unwrap ( ) ;
260+ let read_regs = vm. regs ( ) . unwrap ( ) ;
261+ assert_eq ! ( regs, read_regs) ;
262+ }
263+
264+ #[ test]
265+ fn fpu ( ) {
266+ let vm = boxed_vm ( ) ;
267+
268+ // x87 FPU registers are 80-bit (10 bytes), stored in 16-byte slots for alignment.
269+ // Only the first 10 bytes are preserved; the remaining 6 bytes are reserved/zeroed.
270+ // See Intel® 64 and IA-32 Architectures SDM, Vol. 1, Sec. 10.5.1.1 (x87 State)
271+ let fpr_entry: [ u8 ; 16 ] = [ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 , 0 , 0 , 0 ] ;
272+ let fpu = CommonFpu {
273+ fpr : [ fpr_entry; 8 ] ,
274+ fcw : 2 ,
275+ fsw : 3 ,
276+ ftwx : 4 ,
277+ last_opcode : 5 ,
278+ last_ip : 6 ,
279+ last_dp : 7 ,
280+ xmm : [ [ 8 ; 16 ] ; 16 ] ,
281+ mxcsr : 9 ,
282+ pad1 : 0 ,
283+ pad2 : 0 ,
284+ } ;
285+ vm. set_fpu ( & fpu) . unwrap ( ) ;
286+ #[ cfg_attr( not( kvm) , allow( unused_mut) ) ]
287+ let mut read_fpu = vm. fpu ( ) . unwrap ( ) ;
288+ #[ cfg( kvm) ]
289+ {
290+ read_fpu. mxcsr = fpu. mxcsr ; // KVM get/set fpu does not preserve mxcsr
291+ }
292+ assert_eq ! ( fpu, read_fpu) ;
293+ }
294+
295+ #[ test]
296+ fn sregs ( ) {
297+ let vm = boxed_vm ( ) ;
298+
299+ let segment = CommonSegmentRegister {
300+ base : 1 ,
301+ limit : 2 ,
302+ selector : 3 ,
303+ type_ : 3 ,
304+ present : 1 ,
305+ dpl : 1 ,
306+ db : 0 ,
307+ s : 1 ,
308+ l : 1 ,
309+ g : 1 ,
310+ avl : 1 ,
311+ unusable : 0 ,
312+ padding : 0 ,
313+ } ;
314+
315+ let cs_segment = CommonSegmentRegister {
316+ base : 1 ,
317+ limit : 0xFFFF ,
318+ selector : 0x08 ,
319+ type_ : 0b1011 , // code segment, execute/read, accessed
320+ present : 1 ,
321+ dpl : 1 ,
322+ db : 0 , // must be 0 in 64-bit mode
323+ s : 1 ,
324+ l : 1 , // 64-bit mode
325+ g : 1 ,
326+ avl : 1 ,
327+ unusable : 0 ,
328+ padding : 0 ,
329+ } ;
330+ let table = CommonTableRegister {
331+ base : 12 ,
332+ limit : 13 ,
333+ } ;
334+ let sregs = CommonSpecialRegisters {
335+ cs : cs_segment,
336+ ds : segment,
337+ es : segment,
338+ fs : segment,
339+ gs : segment,
340+ ss : segment,
341+ tr : segment,
342+ ldt : segment,
343+ gdt : table,
344+ idt : table,
345+ cr0 : 0x80000011 , // bit 0 (PE) + bit 4 (ET) + bit 31 (PG)
346+ cr2 : 2 ,
347+ cr3 : 3 ,
348+ cr4 : 0x20 ,
349+ cr8 : 5 ,
350+ efer : 0x500 ,
351+ apic_base : 0xFEE00900 ,
352+ interrupt_bitmap : [ 0 ; 4 ] ,
353+ } ;
354+ vm. set_sregs ( & sregs) . unwrap ( ) ;
355+ let read_sregs = vm. sregs ( ) . unwrap ( ) ;
356+ assert_eq ! ( sregs, read_sregs) ;
357+ }
358+
359+ /// Helper to create a page-aligned memory region for testing
360+ #[ cfg( any( kvm, mshv3) ) ]
361+ fn create_test_memory ( size : usize ) -> crate :: mem:: shared_mem:: ExclusiveSharedMemory {
362+ use hyperlight_common:: mem:: PAGE_SIZE_USIZE ;
363+ let aligned_size = size. div_ceil ( PAGE_SIZE_USIZE ) * PAGE_SIZE_USIZE ;
364+ crate :: mem:: shared_mem:: ExclusiveSharedMemory :: new ( aligned_size) . unwrap ( )
365+ }
366+
367+ /// Helper to create a MemoryRegion from ExclusiveSharedMemory
368+ #[ cfg( any( kvm, mshv3) ) ]
369+ fn region_for_test_memory (
370+ mem : & crate :: mem:: shared_mem:: ExclusiveSharedMemory ,
371+ guest_base : usize ,
372+ flags : crate :: mem:: memory_region:: MemoryRegionFlags ,
373+ ) -> MemoryRegion {
374+ use crate :: mem:: memory_region:: MemoryRegionType ;
375+ use crate :: mem:: shared_mem:: SharedMemory ;
376+ let ptr = mem. base_addr ( ) ;
377+ let len = mem. mem_size ( ) ;
378+ MemoryRegion {
379+ host_region : ptr..( ptr + len) ,
380+ guest_region : guest_base..( guest_base + len) ,
381+ flags,
382+ region_type : MemoryRegionType :: Heap ,
383+ }
384+ }
385+
386+ #[ test]
387+ #[ cfg( any( kvm, mshv3) ) ] // Requires memory mapping support (TODO on WHP)
388+ fn map_memory ( ) {
389+ use crate :: mem:: memory_region:: MemoryRegionFlags ;
390+
391+ let mut vm = boxed_vm ( ) ;
392+
393+ let mem1 = create_test_memory ( 4096 ) ;
394+ let guest_addr: usize = 0x1000 ;
395+ let region = region_for_test_memory (
396+ & mem1,
397+ guest_addr,
398+ MemoryRegionFlags :: READ | MemoryRegionFlags :: WRITE ,
399+ ) ;
400+
401+ // SAFETY: The memory region points to valid memory allocated by ExclusiveSharedMemory,
402+ // and will live until we drop mem1 at the end of the test.
403+ // Slot 0 is not already mapped.
404+ unsafe {
405+ vm. map_memory ( ( 0 , & region) ) . unwrap ( ) ;
406+ }
407+
408+ // Unmap the region
409+ vm. unmap_memory ( ( 0 , & region) ) . unwrap ( ) ;
410+
411+ // Unmapping a region that was already unmapped should fail
412+ vm. unmap_memory ( ( 0 , & region) ) . unwrap_err ( ) ;
413+
414+ // Unmapping a region that was never mapped should fail
415+ vm. unmap_memory ( ( 99 , & region) ) . unwrap_err ( ) ;
416+
417+ // Re-map the same region to a different slot
418+ // SAFETY: Same as above - memory is still valid and slot 1 is not mapped.
419+ unsafe {
420+ vm. map_memory ( ( 1 , & region) ) . unwrap ( ) ;
421+ }
422+
423+ // Map a second region to a different slot
424+ let mem2 = create_test_memory ( 4096 ) ;
425+ let guest_addr2: usize = 0x2000 ;
426+ let region2 = region_for_test_memory (
427+ & mem2,
428+ guest_addr2,
429+ MemoryRegionFlags :: READ | MemoryRegionFlags :: WRITE ,
430+ ) ;
431+
432+ // SAFETY: Memory is valid from ExclusiveSharedMemory, slot 2 is not mapped.
433+ unsafe {
434+ vm. map_memory ( ( 2 , & region2) ) . unwrap ( ) ;
435+ }
436+
437+ // Clean up: unmap both regions
438+ vm. unmap_memory ( ( 1 , & region) ) . unwrap ( ) ;
439+ vm. unmap_memory ( ( 2 , & region2) ) . unwrap ( ) ;
440+ }
203441}
0 commit comments