Skip to content

CVM Guest VSM: refactor the shared/encrypted bitmaps to be partition-wide #1370

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 41 additions & 22 deletions openhcl/underhill_mem/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
use crate::HardwareIsolatedMemoryProtector;
use crate::MemoryAcceptor;
use crate::mapping::GuestMemoryMapping;
use crate::mapping::GuestPartitionMemoryView;
use anyhow::Context;
use cvm_tracing::CVM_ALLOWED;
use futures::future::try_join_all;
Expand Down Expand Up @@ -208,13 +209,19 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
// Do not register this mapping with the kernel. It will not be safe for
// use with syscalls that expect virtual addresses to be in
// kernel-registered RAM.

tracing::debug!("Building valid encrypted memory view");
let encrypted_memory_view = {
let _span = tracing::info_span!("create encrypted memory view", CVM_ALLOWED).entered();
GuestPartitionMemoryView::new(params.mem_layout, true)?
};

tracing::debug!("Building VTL0 memory map");
let vtl0_mapping = Arc::new({
let _span = tracing::info_span!("map_vtl0_memory", CVM_ALLOWED).entered();
GuestMemoryMapping::builder(0)
.dma_base_address(None) // prohibit direct DMA attempts until TDISP is supported
.use_bitmap(Some(true))
.build(&gpa_fd, params.mem_layout)
.dma_base_address(None)
.build_with_bitmap(&gpa_fd, &encrypted_memory_view)
.context("failed to map vtl0 memory")?
});

Expand Down Expand Up @@ -281,26 +288,34 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
// confused about shared memory, and our current use of kernel-mode
// guest memory access is limited to low-perf paths where we can use
// bounce buffering.

tracing::debug!("Building shared memory map");

let shared_memory_view = {
let _span = tracing::info_span!("create shared memory view", CVM_ALLOWED).entered();
GuestPartitionMemoryView::new(params.complete_memory_layout, false)?
};

let valid_shared_memory = shared_memory_view.partition_valid_memory();

// Update the shared mapping bitmap for pages used by the shared
// visibility pool to be marked as shared, since by default pages are
// marked as no-access in the bitmap.
tracing::debug!("Updating shared mapping bitmaps");
for range in params.shared_pool {
valid_shared_memory.as_ref().update_valid(range.range, true);
}

let shared_mapping = Arc::new({
let _span = tracing::info_span!("map_shared_memory", CVM_ALLOWED).entered();
GuestMemoryMapping::builder(shared_offset)
.shared(true)
.use_bitmap(Some(false))
.ignore_registration_failure(params.boot_init.is_none())
.dma_base_address(Some(dma_base_address))
.build(&gpa_fd, params.complete_memory_layout)
.build_with_bitmap(&gpa_fd, &shared_memory_view)
.context("failed to map shared memory")?
});

// Update the shared mapping bitmap for pages used by the shared
// visibility pool to be marked as shared, since by default pages are
// marked as no-access in the bitmap.
tracing::debug!("Updating shared mapping bitmaps");
for range in params.shared_pool {
shared_mapping.update_bitmap(range.range, true);
}

tracing::debug!("Creating VTL0 guest memory");
let vtl0_gm = GuestMemory::new_multi_region(
"vtl0",
Expand All @@ -309,6 +324,7 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
)
.context("failed to make vtl0 guest memory")?;

tracing::debug!("Creating VTL1 guest memory");
let vtl1_gm = if params.maximum_vtl >= Vtl::Vtl1 {
// TODO CVM GUEST VSM: This should not just use the vtl0_gm. This
// could also be further tightened -- whether or not VTL 1 is
Expand Down Expand Up @@ -351,7 +367,8 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
let private_vtl0_memory = GuestMemory::new("trusted", vtl0_mapping.clone());

let protector = Arc::new(HardwareIsolatedMemoryProtector::new(
shared_mapping.clone(),
encrypted_memory_view.partition_valid_memory().clone(),
valid_shared_memory.clone(),
vtl0_mapping.clone(),
params.mem_layout.clone(),
acceptor.as_ref().unwrap().clone(),
Expand All @@ -374,12 +391,13 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
let vtl0_mapping = {
let _span = tracing::info_span!("map_vtl0_memory", CVM_ALLOWED).entered();
let base_address = params.vtl0_alias_map_bit.unwrap_or(0);

Arc::new(
GuestMemoryMapping::builder(base_address)
.for_kernel_access(true)
.dma_base_address(Some(base_address))
.ignore_registration_failure(params.boot_init.is_none())
.build(&gpa_fd, params.mem_layout)
.build_without_bitmap(&gpa_fd, params.mem_layout)
.context("failed to map vtl0 memory")?,
)
};
Expand Down Expand Up @@ -410,13 +428,14 @@ pub async fn init(params: &Init<'_>) -> anyhow::Result<MemoryMappings> {
tracing::debug!("Creating VTL 1 memory map");

let _span = tracing::info_span!("map_vtl1_memory", CVM_ALLOWED).entered();
let vtl1_mapping = GuestMemoryMapping::builder(0)
.for_kernel_access(true)
.dma_base_address(Some(0))
.ignore_registration_failure(params.boot_init.is_none())
.build(&gpa_fd, params.mem_layout)
.context("failed to map vtl1 memory")?;
Some(Arc::new(vtl1_mapping))
Some(Arc::new(
GuestMemoryMapping::builder(0)
.for_kernel_access(true)
.dma_base_address(Some(0))
.ignore_registration_failure(params.boot_init.is_none())
.build_without_bitmap(&gpa_fd, params.mem_layout)
.context("failed to map vtl1 memory")?,
))
}
} else {
None
Expand Down
32 changes: 18 additions & 14 deletions openhcl/underhill_mem/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ use hvdef::hypercall::AcceptMemoryType;
use hvdef::hypercall::HostVisibilityType;
use hvdef::hypercall::HvInputVtl;
use mapping::GuestMemoryMapping;
use mapping::GuestValidMemory;
use memory_range::MemoryRange;
use parking_lot::Mutex;
use registrar::RegisterMemory;
Expand Down Expand Up @@ -384,7 +385,8 @@ struct HypercallOverlay {
}

struct HardwareIsolatedMemoryProtectorInner {
shared: Arc<GuestMemoryMapping>,
valid_encrypted: Arc<GuestValidMemory>,
valid_shared: Arc<GuestValidMemory>,
encrypted: Arc<GuestMemoryMapping>,
default_vtl_permissions: DefaultVtlPermissions,
vtl1_protections_enabled: bool,
Expand All @@ -396,14 +398,16 @@ impl HardwareIsolatedMemoryProtector {
/// `shared` provides the mapping for shared memory. `vtl0` provides the
/// mapping for encrypted memory.
pub fn new(
shared: Arc<GuestMemoryMapping>,
valid_encrypted: Arc<GuestValidMemory>,
valid_shared: Arc<GuestValidMemory>,
encrypted: Arc<GuestMemoryMapping>,
layout: MemoryLayout,
acceptor: Arc<MemoryAcceptor>,
) -> Self {
Self {
inner: Mutex::new(HardwareIsolatedMemoryProtectorInner {
shared,
valid_encrypted,
valid_shared,
encrypted,
// Grant only VTL 0 all permissions. This will be altered
// later by VTL 1 enablement and by VTL 1 itself.
Expand Down Expand Up @@ -506,7 +510,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {
let gpns = gpns
.iter()
.copied()
.filter(|&gpn| inner.shared.check_bitmap(gpn) != shared)
.filter(|&gpn| inner.valid_shared.check_valid(gpn) != shared)
.collect::<Vec<_>>();

tracing::debug!(
Expand All @@ -526,12 +530,12 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {

// Prevent accesses via the wrong address.
let clear_bitmap = if shared {
&inner.encrypted
&inner.valid_encrypted
} else {
&inner.shared
&inner.valid_shared
};
for &range in &ranges {
clear_bitmap.update_bitmap(range, false);
clear_bitmap.update_valid(range, false);
}

// There may be other threads concurrently accessing these pages. We
Expand Down Expand Up @@ -607,7 +611,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {
.expect("previous gpns was already checked");

for &range in &rollback_ranges {
clear_bitmap.update_bitmap(range, true);
clear_bitmap.update_valid(range, true);
}

// Figure out the index of the gpn that failed, in the
Expand Down Expand Up @@ -639,12 +643,12 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {

// Allow accesses via the correct address.
let set_bitmap = if shared {
&inner.shared
&inner.valid_shared
} else {
&inner.encrypted
&inner.valid_encrypted
};
for &range in &ranges {
set_bitmap.update_bitmap(range, true);
set_bitmap.update_valid(range, true);
}

if !shared {
Expand Down Expand Up @@ -697,7 +701,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {

// Set GPN sharing status in output.
for (gpn, host_vis) in gpns.iter().zip(host_visibility.iter_mut()) {
*host_vis = if inner.shared.check_bitmap(*gpn) {
*host_vis = if inner.valid_shared.check_valid(*gpn) {
HostVisibilityType::SHARED
} else {
HostVisibilityType::PRIVATE
Expand Down Expand Up @@ -741,7 +745,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {
// find all accepted memory. When lazy acceptance exists,
// this should track all pages that have been accepted and
// should be used instead.
if !inner.encrypted.check_bitmap(gpn) {
if !inner.valid_encrypted.check_valid(gpn) {
if page_count > 0 {
let end_address = protect_start + (page_count * PAGE_SIZE as u64);
ranges.push(MemoryRange::new(protect_start..end_address));
Expand Down Expand Up @@ -795,7 +799,7 @@ impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {
let inner = self.inner.lock();

// Protections cannot be applied to a host-visible page
if gpns.iter().any(|&gpn| inner.shared.check_bitmap(gpn)) {
if gpns.iter().any(|&gpn| inner.valid_shared.check_valid(gpn)) {
return Err((HvError::OperationDenied, 0));
}

Expand Down
Loading