|
| 1 | + |
| 2 | +use std::mem::ManuallyDrop; |
| 3 | +use std::num::Wrapping; |
| 4 | + |
| 5 | +use vm_memory::{FileOffset, GuestMemoryRegion, MemoryRegionAddress, MmapRegion}; |
| 6 | + |
| 7 | +use super::*; |
| 8 | + |
| 9 | +/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real |
| 10 | +/// `GuestMemoryMmap`, which manages a list of regions and then does a binary |
| 11 | +/// search to determine which region a specific read or write request goes to, |
| 12 | +/// this only uses a single region. Eliminating this binary search significantly |
| 13 | +/// speeds up all queue proofs, because it eliminates the only loop contained herein, |
| 14 | +/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally, |
| 15 | +/// it works identically to `GuestMemoryMmap` with only a single contained region. |
| 16 | +pub struct SingleRegionGuestMemory { |
| 17 | + the_region: vm_memory::GuestRegionMmap, |
| 18 | +} |
| 19 | + |
| 20 | +impl GuestMemory for SingleRegionGuestMemory { |
| 21 | + type R = vm_memory::GuestRegionMmap; |
| 22 | + |
| 23 | + fn num_regions(&self) -> usize { |
| 24 | + 1 |
| 25 | + } |
| 26 | + |
| 27 | + fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { |
| 28 | + self.the_region |
| 29 | + .to_region_addr(addr) |
| 30 | + .map(|_| &self.the_region) |
| 31 | + } |
| 32 | + |
| 33 | + fn iter(&self) -> impl Iterator<Item = &Self::R> { |
| 34 | + std::iter::once(&self.the_region) |
| 35 | + } |
| 36 | + |
| 37 | + fn try_access<F>( |
| 38 | + &self, |
| 39 | + count: usize, |
| 40 | + addr: GuestAddress, |
| 41 | + mut f: F, |
| 42 | + ) -> vm_memory::guest_memory::Result<usize> |
| 43 | + where |
| 44 | + F: FnMut( |
| 45 | + usize, |
| 46 | + usize, |
| 47 | + MemoryRegionAddress, |
| 48 | + &Self::R, |
| 49 | + ) -> vm_memory::guest_memory::Result<usize>, |
| 50 | + { |
| 51 | + // We only have a single region, meaning a lot of the complications of the default |
| 52 | + // try_access implementation for dealing with reads/writes across multiple |
| 53 | + // regions does not apply. |
| 54 | + let region_addr = self |
| 55 | + .the_region |
| 56 | + .to_region_addr(addr) |
| 57 | + .ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?; |
| 58 | + self.the_region |
| 59 | + .checked_offset(region_addr, count) |
| 60 | + .ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?; |
| 61 | + f(0, count, region_addr, &self.the_region) |
| 62 | + } |
| 63 | +} |
| 64 | + |
| 65 | +impl kani::Arbitrary for SingleRegionGuestMemory { |
| 66 | + fn any() -> Self { |
| 67 | + guest_memory( |
| 68 | + ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(), |
| 69 | + ) |
| 70 | + } |
| 71 | +} |
| 72 | +pub struct ProofContext { |
| 73 | + pub queue: Queue, |
| 74 | + pub memory: SingleRegionGuestMemory, |
| 75 | +} |
| 76 | + |
| 77 | +pub struct MmapRegionStub { |
| 78 | + _addr: *mut u8, |
| 79 | + _size: usize, |
| 80 | + _bitmap: (), |
| 81 | + _file_offset: Option<FileOffset>, |
| 82 | + _prot: i32, |
| 83 | + _flags: i32, |
| 84 | + _owned: bool, |
| 85 | + _hugetlbfs: Option<bool>, |
| 86 | +} |
| 87 | + |
| 88 | +/// We start the first guest memory region at an offset so that harnesses using |
| 89 | +/// Queue::any() will be exposed to queue segments both before and after valid guest memory. |
| 90 | +/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the |
| 91 | +/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0` |
| 92 | +const GUEST_MEMORY_BASE: u64 = 0; |
| 93 | + |
| 94 | +// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes |
| 95 | +// to make sure we not only test queues where all segments are consecutively aligned. |
| 96 | +// We need to give at least 16 bytes of buffer space for the descriptor table to be |
| 97 | +// able to change its address, as it is 16-byte aligned. |
| 98 | +const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30; |
| 99 | + |
| 100 | +fn guest_memory(memory: *mut u8) -> SingleRegionGuestMemory { |
| 101 | + // Ideally, we'd want to do |
| 102 | + // let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE) |
| 103 | + // .with_raw_mmap_pointer(bytes.as_mut_ptr()) |
| 104 | + // .build() |
| 105 | + // .unwrap()}; |
| 106 | + // However, .build() calls to .build_raw(), which contains a call to libc::sysconf. |
| 107 | + // Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust |
| 108 | + // standard library using a special version of the libc crate, it runs into some problems |
| 109 | + // [1] Even if we work around those problems, we run into performance problems [2]. |
| 110 | + // Therefore, for now we stick to this ugly transmute hack (which only works because |
| 111 | + // the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)). |
| 112 | + // |
| 113 | + // [1]: https://github.com/model-checking/kani/issues/2673 |
| 114 | + // [2]: https://github.com/model-checking/kani/issues/2538 |
| 115 | + let region_stub = MmapRegionStub { |
| 116 | + _addr: memory, |
| 117 | + _size: GUEST_MEMORY_SIZE, |
| 118 | + _bitmap: Default::default(), |
| 119 | + _file_offset: None, |
| 120 | + _prot: 0, |
| 121 | + _flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, |
| 122 | + _owned: false, |
| 123 | + _hugetlbfs: None, |
| 124 | + }; |
| 125 | + |
| 126 | + let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) }; |
| 127 | + |
| 128 | + let guest_region = |
| 129 | + vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap(); |
| 130 | + |
| 131 | + // Use a single memory region for guests of size < 2GB |
| 132 | + SingleRegionGuestMemory { |
| 133 | + the_region: guest_region, |
| 134 | + } |
| 135 | +} |
| 136 | + |
| 137 | +const MAX_QUEUE_SIZE: u16 = 4; |
| 138 | + |
| 139 | +// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting |
| 140 | +// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2 |
| 141 | +// specification. |
| 142 | +const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE; |
| 143 | + |
| 144 | +/// descriptor table has 16 bytes per entry, avail ring starts right after |
| 145 | +const MAX_START_AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16; |
| 146 | + |
| 147 | +/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE), |
| 148 | +/// and needs 2 bytes of padding |
| 149 | +const MAX_START_USED_RING_BASE_ADDRESS: u64 = |
| 150 | + MAX_START_AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2; |
| 151 | + |
| 152 | +/// The address of the first byte after the queue. Since our queue starts at guest physical |
| 153 | +/// address 0, this is also the size of the memory area occupied by the queue. |
| 154 | +/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE |
| 155 | +const QUEUE_END: u64 = MAX_START_USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64; |
| 156 | + |
| 157 | +impl kani::Arbitrary for ProofContext { |
| 158 | + fn any() -> Self { |
| 159 | + // descriptor table has 16 bytes per entry, avail ring starts right after |
| 160 | + let desc_tbl_queue_size = kani::any::<u16>(); |
| 161 | + // Alignment of the descriptor table is 16 bytes as per the VirtIO spec. |
| 162 | + // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf |
| 163 | + kani::assume( |
| 164 | + desc_tbl_queue_size <= 16 * MAX_QUEUE_SIZE && (desc_tbl_queue_size & 0xF == 0), |
| 165 | + ); |
| 166 | + let avail_ring_base_address: u64 = QUEUE_BASE_ADDRESS + desc_tbl_queue_size as u64; |
| 167 | + |
| 168 | + // Used ring starts after avail ring (which has max size 6 + 2 * MAX_QUEUE_SIZE), |
| 169 | + // and needs 2 bytes of padding |
| 170 | + let avail_ring_queue_size = kani::any::<u16>(); |
| 171 | + // Alignment of the available ring is 2 bytes as per the VirtIO spec. |
| 172 | + // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf |
| 173 | + kani::assume( |
| 174 | + avail_ring_queue_size <= 6 + 2 * MAX_QUEUE_SIZE + 2 |
| 175 | + && (avail_ring_queue_size & 0x1 == 0), |
| 176 | + ); |
| 177 | + let used_ring_base_address: u64 = avail_ring_base_address + avail_ring_queue_size as u64; |
| 178 | + |
| 179 | + // The address of the first byte after the queue. Since our queue starts at guest physical |
| 180 | + // address 0, this is also the size of the memory area occupied by the queue. |
| 181 | + // Note that the used ring structure has max size 6 + 8 * MAX_QUEUE_SIZE |
| 182 | + let used_ring_queue_size = kani::any::<u16>(); |
| 183 | + // Alignment of the used ring is 4 bytes as per the VirtIO spec. |
| 184 | + // See https://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.pdf |
| 185 | + kani::assume( |
| 186 | + avail_ring_queue_size <= 6 + 8 * MAX_QUEUE_SIZE && (used_ring_queue_size & 0x3 == 0), |
| 187 | + ); |
| 188 | + |
| 189 | + // The size of the queue data structures should fill the available space |
| 190 | + kani::assume(QUEUE_END == used_ring_base_address + used_ring_queue_size as u64); |
| 191 | + |
| 192 | + let mem = SingleRegionGuestMemory::any(); |
| 193 | + |
| 194 | + let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap(); |
| 195 | + |
| 196 | + queue.ready = true; |
| 197 | + |
| 198 | + queue.set_desc_table_address( |
| 199 | + Some(QUEUE_BASE_ADDRESS as u32), |
| 200 | + Some((QUEUE_BASE_ADDRESS >> 32) as u32), |
| 201 | + ); |
| 202 | + |
| 203 | + queue.set_avail_ring_address( |
| 204 | + Some(avail_ring_base_address as u32), |
| 205 | + Some((avail_ring_base_address >> 32) as u32), |
| 206 | + ); |
| 207 | + |
| 208 | + queue.set_used_ring_address( |
| 209 | + Some(used_ring_base_address as u32), |
| 210 | + Some((used_ring_base_address >> 32) as u32), |
| 211 | + ); |
| 212 | + |
| 213 | + queue.set_next_avail(kani::any()); |
| 214 | + queue.set_next_used(kani::any()); |
| 215 | + queue.set_event_idx(kani::any()); |
| 216 | + queue.num_added = Wrapping(kani::any()); |
| 217 | + |
| 218 | + kani::assume(queue.is_valid(&mem)); |
| 219 | + |
| 220 | + ProofContext { queue, memory: mem } |
| 221 | + } |
| 222 | +} |
| 223 | + |
| 224 | +#[kani::proof] |
| 225 | +// There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place. |
| 226 | +// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just |
| 227 | +// recursively calls itself. Kani will generally unwind this recursion infinitely. |
| 228 | +#[kani::unwind(0)] |
| 229 | +fn verify_spec_2_7_7_2() { |
| 230 | + // Section 2.7.7.2 deals with device-to-driver notification suppression. |
| 231 | + // It describes a mechanism by which the driver can tell the device that it does not |
| 232 | + // want notifications (IRQs) about the device finishing processing individual buffers |
| 233 | + // (descriptor chain heads) from the avail ring until a specific number of descriptors |
| 234 | + // has been processed. This is done by the driver |
| 235 | + // defining a "used_event" index, which tells the device "please do not notify me until |
| 236 | + // used.ring[used_event] has been written to by you". |
| 237 | + let ProofContext { |
| 238 | + mut queue, |
| 239 | + memory: mem, |
| 240 | + } = kani::any(); |
| 241 | + |
| 242 | + let num_added_old = queue.num_added.0; |
| 243 | + let needs_notification = queue.needs_notification(&mem); |
| 244 | + |
| 245 | + // event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated |
| 246 | + if !queue.event_idx_enabled { |
| 247 | + // The specification here says |
| 248 | + // After the device writes a descriptor index into the used ring: |
| 249 | + // – If flags is 1, the device SHOULD NOT send a notification. |
| 250 | + // – If flags is 0, the device MUST send a notification |
| 251 | + // flags is the first field in the avail_ring_address, which we completely ignore. We |
| 252 | + // always send a notification, and as there only is a SHOULD NOT, that is okay |
| 253 | + assert!(needs_notification.unwrap()); |
| 254 | + } else { |
| 255 | + // next_used - 1 is where the previous descriptor was placed |
| 256 | + if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap()) |
| 257 | + == std::num::Wrapping(queue.next_used - Wrapping(1)) |
| 258 | + && num_added_old > 0 |
| 259 | + { |
| 260 | + // If the idx field in the used ring (which determined where that descriptor index |
| 261 | + // was placed) was equal to used_event, the device MUST send a |
| 262 | + // notification. |
| 263 | + assert!(needs_notification.unwrap()); |
| 264 | + |
| 265 | + kani::cover!(); |
| 266 | + } |
| 267 | + |
| 268 | + // The other case is handled by a "SHOULD NOT send a notification" in the spec. |
| 269 | + // So we do not care |
| 270 | + } |
| 271 | +} |
0 commit comments