Skip to content

Commit 2547a0e

Browse files
committed
move started to uhcvmvpinner
1 parent 9683856 commit 2547a0e

File tree

6 files changed

+67
-104
lines changed

6 files changed

+67
-104
lines changed

Diff for: openhcl/virt_mshv_vtl/src/lib.rs

+18-8
Original file line numberDiff line numberDiff line change
@@ -346,8 +346,6 @@ pub struct UhCvmVpState {
346346
hv: VtlArray<ProcessorVtlHv, 2>,
347347
/// LAPIC state.
348348
lapics: VtlArray<LapicState, 2>,
349-
vtl1_enabled: Mutex<bool>,
350-
vp_started: bool,
351349
}
352350

353351
#[cfg(guest_arch = "x86_64")]
@@ -384,8 +382,6 @@ impl UhCvmVpState {
384382
exit_vtl: GuestVtl::Vtl0,
385383
hv,
386384
lapics,
387-
vtl1_enabled: Mutex::new(false),
388-
vp_started: false,
389385
}
390386
}
391387
}
@@ -424,6 +420,8 @@ pub struct UhCvmVpInner {
424420
tlb_lock_info: VtlArray<TlbLockInfo, 2>,
425421
/// Whether VTL 1 has been enabled on the vp
426422
vtl1_enabled: Mutex<bool>,
423+
/// Whether the VP has been started via the StartVp hypercall.
424+
started: AtomicBool,
427425
}
428426

429427
#[cfg_attr(guest_arch = "aarch64", expect(dead_code))]
@@ -499,7 +497,7 @@ struct VbsIsolatedVtl1State {
499497
#[derive(Clone, Copy, Default, Inspect)]
500498
struct HardwareCvmVtl1State {
501499
/// Whether VTL 1 has been enabled on any vp
502-
enabled_on_vp_count: u32,
500+
enabled_on_any_vp: bool,
503501
/// Whether guest memory should be zeroed before it resets.
504502
zero_memory_on_reset: bool,
505503
/// Whether a vp can be started or reset by a lower vtl.
@@ -654,12 +652,23 @@ struct UhVpInner {
654652
sidecar_exit_reason: Mutex<Option<SidecarExitReason>>,
655653
}
656654

655+
#[cfg_attr(not(guest_arch = "x86_64"), allow(dead_code))]
656+
#[derive(Debug, Inspect)]
657+
/// Which operation is setting the initial vp context
658+
pub enum InitialVpContextOperation {
659+
/// The VP is being started via the StartVp hypercall.
660+
StartVp,
661+
/// The VP is being started via the EnableVpVtl hypercall.
662+
EnableVpVtl,
663+
}
664+
657665
#[cfg_attr(not(guest_arch = "x86_64"), allow(dead_code))]
658666
#[derive(Debug, Inspect)]
659667
/// State for handling StartVp/EnableVpVtl hypercalls.
660668
pub struct VpStartEnableVtl {
661-
/// True if the context is for startvp, false for enablevpvtl
662-
is_start: bool,
669+
/// Which operation, startvp or enablevpvtl, is setting the initial vp
670+
/// context
671+
operation: InitialVpContextOperation,
663672
#[inspect(skip)]
664673
context: hvdef::hypercall::InitialVpContextX64,
665674
}
@@ -1872,9 +1881,10 @@ impl UhProtoPartition<'_> {
18721881
) -> Result<UhCvmPartitionState, Error> {
18731882
let vp_count = params.topology.vp_count() as usize;
18741883
let vps = (0..vp_count)
1875-
.map(|_vp_index| UhCvmVpInner {
1884+
.map(|vp_index| UhCvmVpInner {
18761885
tlb_lock_info: VtlArray::from_fn(|_| TlbLockInfo::new(vp_count)),
18771886
vtl1_enabled: Mutex::new(false),
1887+
started: AtomicBool::new(vp_index == 0),
18781888
})
18791889
.collect();
18801890
let tlb_locked_vps =

Diff for: openhcl/virt_mshv_vtl/src/processor/hardware_cvm/mod.rs

+46-32
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ use crate::validate_vtl_gpa_flags;
1515
use crate::GuestVsmState;
1616
use crate::GuestVsmVtl1State;
1717
use crate::GuestVtl;
18+
use crate::InitialVpContextOperation;
1819
use crate::TlbFlushLockAccess;
1920
use crate::VpStartEnableVtl;
2021
use crate::WakeReason;
@@ -936,12 +937,12 @@ impl<T, B: HardwareIsolatedBacking>
936937
}
937938

938939
let target_vtl = self.target_vtl_no_higher(target_vtl)?;
939-
let target_vp = &self.vp.partition.vps[target_vp as usize];
940+
let target_vp_inner = self.vp.cvm_partition().vp_inner(target_vp);
940941

941942
// The target VTL must have been enabled. In addition, if lower VTL
942943
// startup has been suppressed, then the request must be coming from a
943944
// secure VTL.
944-
if target_vtl == GuestVtl::Vtl1 && !target_vp.hcvm_vtl1_state.lock().enabled {
945+
if target_vtl == GuestVtl::Vtl1 && !*target_vp_inner.vtl1_enabled.lock() {
945946
return Err(HvError::InvalidVpState);
946947
}
947948

@@ -952,7 +953,7 @@ impl<T, B: HardwareIsolatedBacking>
952953
.guest_vsm
953954
.read()
954955
.get_hardware_cvm()
955-
.is_some_and(|inner| inner.deny_lower_vtl_startup)
956+
.is_some_and(|state| state.deny_lower_vtl_startup)
956957
{
957958
return Err(HvError::AccessDenied);
958959
}
@@ -966,29 +967,27 @@ impl<T, B: HardwareIsolatedBacking>
966967
// becomes a problem. Note that this will not apply to non-hardware cvms
967968
// as this may regress existing VMs.
968969

969-
let mut target_vp_vtl1_state = target_vp.hcvm_vtl1_state.lock();
970-
if self
971-
.vp
972-
.partition
973-
.guest_vsm
974-
.read()
975-
.get_hardware_cvm()
976-
.is_some()
977-
&& target_vp_vtl1_state.started
970+
// After this check, there can be no more failures, so try setting the
971+
// fact that the VM started to true here.
972+
if target_vp_inner
973+
.started
974+
.compare_exchange(
975+
false,
976+
true,
977+
std::sync::atomic::Ordering::Relaxed,
978+
std::sync::atomic::Ordering::Relaxed,
979+
)
980+
.is_err()
978981
{
979982
return Err(HvError::InvalidVpState);
980983
}
981984

982-
// There can be no more errors from here, so just set started to
983-
// true while holding the lock
984-
985-
target_vp_vtl1_state.started = true;
986-
987985
let start_state = VpStartEnableVtl {
988-
is_start: true,
986+
operation: InitialVpContextOperation::StartVp,
989987
context: *vp_context,
990988
};
991989

990+
let target_vp = &self.vp.partition.vps[target_vp as usize];
992991
*target_vp.hv_start_enable_vtl_vp[target_vtl].lock() = Some(Box::new(start_state));
993992
target_vp.wake(target_vtl, WakeReason::HV_START_ENABLE_VP_VTL);
994993

@@ -1067,6 +1066,12 @@ impl<T, B: HardwareIsolatedBacking>
10671066
vtl: Vtl,
10681067
vp_context: &hvdef::hypercall::InitialVpContextX64,
10691068
) -> HvResult<()> {
1069+
tracing::debug!(
1070+
vp_index = self.vp.vp_index().index(),
1071+
target_vp = vp_index,
1072+
?vtl,
1073+
"HvEnableVpVtl"
1074+
);
10701075
if partition_id != hvdef::HV_PARTITION_ID_SELF {
10711076
return Err(HvError::InvalidPartitionId);
10721077
}
@@ -1097,7 +1102,7 @@ impl<T, B: HardwareIsolatedBacking>
10971102
// the higher VTL has not been enabled on any other VP because at that
10981103
// point, the higher VTL should be orchestrating its own enablement.
10991104
if self.intercepted_vtl < GuestVtl::Vtl1 {
1100-
if vtl1_state.enabled_on_vp_count > 0 || vp_index != current_vp_index {
1105+
if vtl1_state.enabled_on_any_vp || vp_index != current_vp_index {
11011106
return Err(HvError::AccessDenied);
11021107
}
11031108

@@ -1106,7 +1111,7 @@ impl<T, B: HardwareIsolatedBacking>
11061111
// If handling on behalf of VTL 1, then some other VP (i.e. the
11071112
// bsp) must have already handled EnableVpVtl. No partition-wide
11081113
// state is changing, so no need to hold the lock
1109-
assert!(vtl1_state.enabled_on_vp_count > 0);
1114+
assert!(vtl1_state.enabled_on_any_vp);
11101115
None
11111116
}
11121117
};
@@ -1120,7 +1125,7 @@ impl<T, B: HardwareIsolatedBacking>
11201125
.vtl1_enabled
11211126
.lock();
11221127

1123-
if inner_vtl1_state.enabled {
1128+
if *vtl1_enabled {
11241129
return Err(HvError::VtlAlreadyEnabled);
11251130
}
11261131

@@ -1153,13 +1158,16 @@ impl<T, B: HardwareIsolatedBacking>
11531158

11541159
// Cannot fail from here
11551160
if let Some(mut gvsm) = gvsm_state {
1156-
gvsm.get_hardware_cvm_mut().unwrap().enabled_on_vp_count += 1;
1161+
// It's valid to only set this when gvsm_state is Some (when VTL 0
1162+
// was intercepted) only because we assert above that if VTL 1 was
1163+
// intercepted, some vp has already enabled VTL 1 on it.
1164+
gvsm.get_hardware_cvm_mut().unwrap().enabled_on_any_vp = true;
11571165
}
11581166

1159-
inner_vtl1_state.enabled = true;
1167+
*vtl1_enabled = true;
11601168

11611169
let enable_vp_vtl_state = VpStartEnableVtl {
1162-
is_start: false,
1170+
operation: InitialVpContextOperation::EnableVpVtl,
11631171
context: *vp_context,
11641172
};
11651173

@@ -1493,7 +1501,8 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
14931501
tracing::debug!(
14941502
vp_index = self.inner.cpu_index,
14951503
?vtl,
1496-
"starting vp with initial registers"
1504+
?start_enable_vtl_state.operation,
1505+
"setting up vp with initial registers"
14971506
);
14981507

14991508
hv1_emulator::hypercall::set_x86_vp_context(
@@ -1502,10 +1511,13 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
15021511
)
15031512
.map_err(UhRunVpError::State)?;
15041513

1505-
if start_enable_vtl_state.is_start {
1514+
if matches!(
1515+
start_enable_vtl_state.operation,
1516+
InitialVpContextOperation::StartVp
1517+
) {
15061518
match vtl {
15071519
GuestVtl::Vtl0 => {
1508-
if self.inner.hcvm_vtl1_state.lock().enabled {
1520+
if *self.cvm_vp_inner().vtl1_enabled.lock() {
15091521
// When starting a VP targeting VTL on a
15101522
// hardware confidential VM, if VTL 1 has been
15111523
// enabled, switch to it (the highest enabled
@@ -1516,20 +1528,17 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
15161528
// a second+ startvp call for a vp should be
15171529
// revisited.
15181530
//
1519-
// For other VM types, the hypervisor is
1520-
// responsible for running the correct VTL.
1521-
//
15221531
// Furthermore, there is no need to copy the
15231532
// shared VTL registers if starting the VP on an
15241533
// already running VP is disallowed. Even if
15251534
// this was allowed, copying the registers may
15261535
// not be desirable.
15271536

1528-
B::set_exit_vtl(self, GuestVtl::Vtl1);
1537+
self.backing.cvm_state_mut().exit_vtl = GuestVtl::Vtl1;
15291538
}
15301539
}
15311540
GuestVtl::Vtl1 => {
1532-
B::set_exit_vtl(self, GuestVtl::Vtl1);
1541+
self.backing.cvm_state_mut().exit_vtl = GuestVtl::Vtl1;
15331542
}
15341543
}
15351544
}
@@ -1571,6 +1580,11 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
15711580
target_vtl: GuestVtl,
15721581
config: HvRegisterVsmVpSecureVtlConfig,
15731582
) -> Result<(), HvError> {
1583+
tracing::debug!(
1584+
?requesting_vtl,
1585+
?target_vtl,
1586+
"setting vsm vp secure config vtl"
1587+
);
15741588
if requesting_vtl <= target_vtl {
15751589
return Err(HvError::AccessDenied);
15761590
}

Diff for: openhcl/virt_mshv_vtl/src/processor/mod.rs

-20
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ use super::UhPartitionInner;
3636
use super::UhVpInner;
3737
use crate::GuestVsmState;
3838
use crate::GuestVtl;
39-
// use crate::UhVpCvmVtl1State;
4039
use crate::WakeReason;
4140
use hcl::ioctl;
4241
use hcl::ioctl::ProcessorRunner;
@@ -241,17 +240,6 @@ mod private {
241240
/// This is used for hypervisor-managed and untrusted SINTs.
242241
fn request_untrusted_sint_readiness(this: &mut UhProcessor<'_, Self>, sints: u16);
243242

244-
// /// Copies shared registers (per VSM TLFS spec) from the source VTL to
245-
// /// the target VTL that will become active, and marks the target_vtl as
246-
// /// the exit vtl.
247-
// fn switch_vtl(
248-
// _this: &mut UhProcessor<'_, Self>,
249-
// _source_vtl: GuestVtl,
250-
// _target_vtl: GuestVtl,
251-
// ) {
252-
// unimplemented!("switch_vtl not implemented");
253-
// }
254-
255243
/// Checks interrupt status for all VTLs, and handles cross VTL interrupt preemption and VINA.
256244
/// Returns whether interrupt reprocessing is required.
257245
fn handle_cross_vtl_interrupts(
@@ -303,10 +291,6 @@ pub trait HardwareIsolatedBacking: Backing {
303291
this: &UhProcessor<'_, Self>,
304292
vtl: GuestVtl,
305293
) -> TranslationRegisters;
306-
/// Sets the exit vtl
307-
fn set_exit_vtl(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
308-
this.backing.cvm_state_mut().exit_vtl = vtl;
309-
}
310294
}
311295

312296
#[cfg_attr(guest_arch = "aarch64", allow(dead_code))]
@@ -364,10 +348,6 @@ impl UhVpInner {
364348
waker: Default::default(),
365349
cpu_index,
366350
vp_info,
367-
// hcvm_vtl1_state: Mutex::new(UhVpCvmVtl1State {
368-
// enabled: false,
369-
// started: cpu_index == 0,
370-
// }),
371351
hv_start_enable_vtl_vp: VtlArray::from_fn(|_| Mutex::new(None)),
372352
sidecar_exit_reason: Default::default(),
373353
}

Diff for: openhcl/virt_mshv_vtl/src/processor/mshv/arm64.rs

-4
Original file line numberDiff line numberDiff line change
@@ -196,10 +196,6 @@ impl BackingPrivate for HypervisorBackedArm64 {
196196
.set_sints(this.backing.next_deliverability_notifications.sints() | sints);
197197
}
198198

199-
fn set_exit_vtl(_this: &mut UhProcessor<'_, Self>, _vtl: GuestVtl) {
200-
unimplemented!();
201-
}
202-
203199
fn handle_cross_vtl_interrupts(
204200
_this: &mut UhProcessor<'_, Self>,
205201
_dev: &impl CpuIo,

Diff for: openhcl/virt_mshv_vtl/src/processor/snp/mod.rs

-36
Original file line numberDiff line numberDiff line change
@@ -514,9 +514,6 @@ impl BackingPrivate for SnpBacked {
514514
fn vtl1_inspectable(this: &UhProcessor<'_, Self>) -> bool {
515515
this.hcvm_vtl1_inspectable()
516516
}
517-
// fn set_exit_vtl(this: &mut UhProcessor<'_, Self>, vtl: GuestVtl) {
518-
// this.backing.cvm_state_mut().exit_vtl = vtl;
519-
// }
520517
}
521518

522519
fn virt_seg_to_snp(val: SegmentRegister) -> SevSelector {
@@ -2201,39 +2198,6 @@ impl<T: CpuIo> hv1_hypercall::EnablePartitionVtl for UhHypercallHandler<'_, '_,
22012198
}
22022199
}
22032200

2204-
impl<T: CpuIo> hv1_hypercall::EnableVpVtl<hvdef::hypercall::InitialVpContextX64>
2205-
for UhHypercallHandler<'_, '_, T, SnpBacked>
2206-
{
2207-
fn enable_vp_vtl(
2208-
&mut self,
2209-
partition_id: u64,
2210-
vp_index: u32,
2211-
vtl: Vtl,
2212-
vp_context: &hvdef::hypercall::InitialVpContextX64,
2213-
) -> hvdef::HvResult<()> {
2214-
self.hcvm_enable_vp_vtl(partition_id, vp_index, vtl, vp_context)
2215-
}
2216-
}
2217-
2218-
impl<T: CpuIo> hv1_hypercall::RetargetDeviceInterrupt for UhHypercallHandler<'_, '_, T, SnpBacked> {
2219-
fn retarget_interrupt(
2220-
&mut self,
2221-
device_id: u64,
2222-
address: u64,
2223-
data: u32,
2224-
params: &hv1_hypercall::HvInterruptParameters<'_>,
2225-
) -> hvdef::HvResult<()> {
2226-
self.hcvm_retarget_interrupt(
2227-
device_id,
2228-
address,
2229-
data,
2230-
params.vector,
2231-
params.multicast,
2232-
params.target_processors,
2233-
)
2234-
}
2235-
}
2236-
22372201
impl<T: CpuIo> hv1_hypercall::VtlSwitchOps for UhHypercallHandler<'_, '_, T, SnpBacked> {
22382202
fn advance_ip(&mut self) {
22392203
let is_64bit = self.vp.long_mode(self.intercepted_vtl);

0 commit comments

Comments
 (0)