@@ -15,6 +15,7 @@ use crate::validate_vtl_gpa_flags;
15
15
use crate :: GuestVsmState ;
16
16
use crate :: GuestVsmVtl1State ;
17
17
use crate :: GuestVtl ;
18
+ use crate :: InitialVpContextOperation ;
18
19
use crate :: TlbFlushLockAccess ;
19
20
use crate :: VpStartEnableVtl ;
20
21
use crate :: WakeReason ;
@@ -936,12 +937,12 @@ impl<T, B: HardwareIsolatedBacking>
936
937
}
937
938
938
939
let target_vtl = self . target_vtl_no_higher ( target_vtl) ?;
939
- let target_vp = & self . vp . partition . vps [ target_vp as usize ] ;
940
+ let target_vp_inner = self . vp . cvm_partition ( ) . vp_inner ( target_vp) ;
940
941
941
942
// The target VTL must have been enabled. In addition, if lower VTL
942
943
// startup has been suppressed, then the request must be coming from a
943
944
// secure VTL.
944
- if target_vtl == GuestVtl :: Vtl1 && !target_vp . hcvm_vtl1_state . lock ( ) . enabled {
945
+ if target_vtl == GuestVtl :: Vtl1 && !* target_vp_inner . vtl1_enabled . lock ( ) {
945
946
return Err ( HvError :: InvalidVpState ) ;
946
947
}
947
948
@@ -952,7 +953,7 @@ impl<T, B: HardwareIsolatedBacking>
952
953
. guest_vsm
953
954
. read ( )
954
955
. get_hardware_cvm ( )
955
- . is_some_and ( |inner| inner . deny_lower_vtl_startup )
956
+ . is_some_and ( |state| state . deny_lower_vtl_startup )
956
957
{
957
958
return Err ( HvError :: AccessDenied ) ;
958
959
}
@@ -966,29 +967,27 @@ impl<T, B: HardwareIsolatedBacking>
966
967
// becomes a problem. Note that this will not apply to non-hardware cvms
967
968
// as this may regress existing VMs.
968
969
969
- let mut target_vp_vtl1_state = target_vp. hcvm_vtl1_state . lock ( ) ;
970
- if self
971
- . vp
972
- . partition
973
- . guest_vsm
974
- . read ( )
975
- . get_hardware_cvm ( )
976
- . is_some ( )
977
- && target_vp_vtl1_state. started
970
+ // After this check, there can be no more failures, so try setting the
971
+ // fact that the VM started to true here.
972
+ if target_vp_inner
973
+ . started
974
+ . compare_exchange (
975
+ false ,
976
+ true ,
977
+ std:: sync:: atomic:: Ordering :: Relaxed ,
978
+ std:: sync:: atomic:: Ordering :: Relaxed ,
979
+ )
980
+ . is_err ( )
978
981
{
979
982
return Err ( HvError :: InvalidVpState ) ;
980
983
}
981
984
982
- // There can be no more errors from here, so just set started to
983
- // true while holding the lock
984
-
985
- target_vp_vtl1_state. started = true ;
986
-
987
985
let start_state = VpStartEnableVtl {
988
- is_start : true ,
986
+ operation : InitialVpContextOperation :: StartVp ,
989
987
context : * vp_context,
990
988
} ;
991
989
990
+ let target_vp = & self . vp . partition . vps [ target_vp as usize ] ;
992
991
* target_vp. hv_start_enable_vtl_vp [ target_vtl] . lock ( ) = Some ( Box :: new ( start_state) ) ;
993
992
target_vp. wake ( target_vtl, WakeReason :: HV_START_ENABLE_VP_VTL ) ;
994
993
@@ -1067,6 +1066,12 @@ impl<T, B: HardwareIsolatedBacking>
1067
1066
vtl : Vtl ,
1068
1067
vp_context : & hvdef:: hypercall:: InitialVpContextX64 ,
1069
1068
) -> HvResult < ( ) > {
1069
+ tracing:: debug!(
1070
+ vp_index = self . vp. vp_index( ) . index( ) ,
1071
+ target_vp = vp_index,
1072
+ ?vtl,
1073
+ "HvEnableVpVtl"
1074
+ ) ;
1070
1075
if partition_id != hvdef:: HV_PARTITION_ID_SELF {
1071
1076
return Err ( HvError :: InvalidPartitionId ) ;
1072
1077
}
@@ -1097,7 +1102,7 @@ impl<T, B: HardwareIsolatedBacking>
1097
1102
// the higher VTL has not been enabled on any other VP because at that
1098
1103
// point, the higher VTL should be orchestrating its own enablement.
1099
1104
if self . intercepted_vtl < GuestVtl :: Vtl1 {
1100
- if vtl1_state. enabled_on_vp_count > 0 || vp_index != current_vp_index {
1105
+ if vtl1_state. enabled_on_any_vp || vp_index != current_vp_index {
1101
1106
return Err ( HvError :: AccessDenied ) ;
1102
1107
}
1103
1108
@@ -1106,7 +1111,7 @@ impl<T, B: HardwareIsolatedBacking>
1106
1111
// If handling on behalf of VTL 1, then some other VP (i.e. the
1107
1112
// bsp) must have already handled EnableVpVtl. No partition-wide
1108
1113
// state is changing, so no need to hold the lock
1109
- assert ! ( vtl1_state. enabled_on_vp_count > 0 ) ;
1114
+ assert ! ( vtl1_state. enabled_on_any_vp ) ;
1110
1115
None
1111
1116
}
1112
1117
} ;
@@ -1120,7 +1125,7 @@ impl<T, B: HardwareIsolatedBacking>
1120
1125
. vtl1_enabled
1121
1126
. lock ( ) ;
1122
1127
1123
- if inner_vtl1_state . enabled {
1128
+ if * vtl1_enabled {
1124
1129
return Err ( HvError :: VtlAlreadyEnabled ) ;
1125
1130
}
1126
1131
@@ -1153,13 +1158,16 @@ impl<T, B: HardwareIsolatedBacking>
1153
1158
1154
1159
// Cannot fail from here
1155
1160
if let Some ( mut gvsm) = gvsm_state {
1156
- gvsm. get_hardware_cvm_mut ( ) . unwrap ( ) . enabled_on_vp_count += 1 ;
1161
+ // It's valid to only set this when gvsm_state is Some (when VTL 0
1162
+ // was intercepted) only because we assert above that if VTL 1 was
1163
+ // intercepted, some vp has already enabled VTL 1 on it.
1164
+ gvsm. get_hardware_cvm_mut ( ) . unwrap ( ) . enabled_on_any_vp = true ;
1157
1165
}
1158
1166
1159
- inner_vtl1_state . enabled = true ;
1167
+ * vtl1_enabled = true ;
1160
1168
1161
1169
let enable_vp_vtl_state = VpStartEnableVtl {
1162
- is_start : false ,
1170
+ operation : InitialVpContextOperation :: EnableVpVtl ,
1163
1171
context : * vp_context,
1164
1172
} ;
1165
1173
@@ -1493,7 +1501,8 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
1493
1501
tracing:: debug!(
1494
1502
vp_index = self . inner. cpu_index,
1495
1503
?vtl,
1496
- "starting vp with initial registers"
1504
+ ?start_enable_vtl_state. operation,
1505
+ "setting up vp with initial registers"
1497
1506
) ;
1498
1507
1499
1508
hv1_emulator:: hypercall:: set_x86_vp_context (
@@ -1502,10 +1511,13 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
1502
1511
)
1503
1512
. map_err ( UhRunVpError :: State ) ?;
1504
1513
1505
- if start_enable_vtl_state. is_start {
1514
+ if matches ! (
1515
+ start_enable_vtl_state. operation,
1516
+ InitialVpContextOperation :: StartVp
1517
+ ) {
1506
1518
match vtl {
1507
1519
GuestVtl :: Vtl0 => {
1508
- if self . inner . hcvm_vtl1_state . lock ( ) . enabled {
1520
+ if * self . cvm_vp_inner ( ) . vtl1_enabled . lock ( ) {
1509
1521
// When starting a VP targeting VTL on a
1510
1522
// hardware confidential VM, if VTL 1 has been
1511
1523
// enabled, switch to it (the highest enabled
@@ -1516,20 +1528,17 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
1516
1528
// a second+ startvp call for a vp should be
1517
1529
// revisited.
1518
1530
//
1519
- // For other VM types, the hypervisor is
1520
- // responsible for running the correct VTL.
1521
- //
1522
1531
// Furthermore, there is no need to copy the
1523
1532
// shared VTL registers if starting the VP on an
1524
1533
// already running VP is disallowed. Even if
1525
1534
// this was allowed, copying the registers may
1526
1535
// not be desirable.
1527
1536
1528
- B :: set_exit_vtl ( self , GuestVtl :: Vtl1 ) ;
1537
+ self . backing . cvm_state_mut ( ) . exit_vtl = GuestVtl :: Vtl1 ;
1529
1538
}
1530
1539
}
1531
1540
GuestVtl :: Vtl1 => {
1532
- B :: set_exit_vtl ( self , GuestVtl :: Vtl1 ) ;
1541
+ self . backing . cvm_state_mut ( ) . exit_vtl = GuestVtl :: Vtl1 ;
1533
1542
}
1534
1543
}
1535
1544
}
@@ -1571,6 +1580,11 @@ impl<B: HardwareIsolatedBacking> UhProcessor<'_, B> {
1571
1580
target_vtl : GuestVtl ,
1572
1581
config : HvRegisterVsmVpSecureVtlConfig ,
1573
1582
) -> Result < ( ) , HvError > {
1583
+ tracing:: debug!(
1584
+ ?requesting_vtl,
1585
+ ?target_vtl,
1586
+ "setting vsm vp secure config vtl"
1587
+ ) ;
1574
1588
if requesting_vtl <= target_vtl {
1575
1589
return Err ( HvError :: AccessDenied ) ;
1576
1590
}
0 commit comments