Skip to content

virtq: specify endianness #1141

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 40 additions & 59 deletions src/drivers/virtio/virtqueue/packed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use core::ptr;
use core::sync::atomic::{fence, Ordering};

use align_address::Align;
use zerocopy::little_endian;

use super::super::features::Features;
#[cfg(not(feature = "pci"))]
Expand Down Expand Up @@ -281,7 +282,7 @@ impl DescriptorRing {
// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
// See Virtio specfification v1.1. - 2.7.21
fence(Ordering::SeqCst);
self.ring[first_ctrl_settings.0].flags |= first_ctrl_settings.2.as_flags_avail();
self.ring[first_ctrl_settings.0].flags |= first_ctrl_settings.2.as_flags_avail().into();

// Converting a boolean as u8 is fine
(first_ctrl_settings.0, first_ctrl_settings.2 .0 as u8)
Expand Down Expand Up @@ -448,7 +449,7 @@ impl<'a> ReadCtrl<'a> {
/// updating the queue and returns the respective TransferToken.
fn poll_next(&mut self) -> Option<Box<TransferToken>> {
// Check if descriptor has been marked used.
if self.desc_ring.ring[self.position].flags & WrapCount::flag_mask()
if self.desc_ring.ring[self.position].flags.get() & WrapCount::flag_mask()
== self.desc_ring.dev_wc.as_flags_used()
{
let buff_id = usize::from(self.desc_ring.ring[self.position].buff_id);
Expand Down Expand Up @@ -489,10 +490,10 @@ impl<'a> ReadCtrl<'a> {
// Need to only check for either send or receive buff to contain
// a ctrl_desc as, both carry the same if they carry one.
if send_buff.is_indirect() {
self.update_indirect(Some(send_buff), Some((recv_buff, write_len)));
self.update_indirect(Some(send_buff), Some((recv_buff, write_len.into())));
} else {
self.update_send(send_buff);
self.update_recv((recv_buff, write_len));
self.update_recv((recv_buff, write_len.into()));
}
}
(Some(send_buff), None) => {
Expand All @@ -504,9 +505,9 @@ impl<'a> ReadCtrl<'a> {
}
(None, Some(recv_buff)) => {
if recv_buff.is_indirect() {
self.update_indirect(None, Some((recv_buff, write_len)));
self.update_indirect(None, Some((recv_buff, write_len.into())));
} else {
self.update_recv((recv_buff, write_len));
self.update_recv((recv_buff, write_len.into()));
}
}
(None, None) => unreachable!("Empty Transfers are not allowed..."),
Expand Down Expand Up @@ -555,13 +556,13 @@ impl<'a> ReadCtrl<'a> {
// Unwrapping is fine here, as lists must be of same size and same ordering
let ring_desc = desc_iter.next().unwrap();

if write_len >= ring_desc.len {
if write_len >= ring_desc.len.into() {
// Complete length has been written but reduce len_written for next one
write_len -= ring_desc.len;
write_len -= ring_desc.len.get();
} else {
ring_desc.len = write_len;
ring_desc.len = (write_len).into();
desc.len = write_len as usize;
write_len -= ring_desc.len;
write_len -= ring_desc.len.get();
assert_eq!(write_len, 0);
}
}
Expand Down Expand Up @@ -607,13 +608,13 @@ impl<'a> ReadCtrl<'a> {
// Unwrapping is fine here, as lists must be of same size and same ordering
let ring_desc = desc_iter.next().unwrap();

if write_len >= ring_desc.len {
if write_len >= ring_desc.len.into() {
// Complete length has been written but reduce len_written for next one
write_len -= ring_desc.len;
write_len -= ring_desc.len.get();
} else {
ring_desc.len = write_len;
ring_desc.len = write_len.into();
desc.len = write_len as usize;
write_len -= ring_desc.len;
write_len -= ring_desc.len.get();
assert_eq!(write_len, 0);
}
}
Expand All @@ -634,7 +635,7 @@ impl<'a> ReadCtrl<'a> {
// self.desc_ring.ring[self.position].address = 0;
// self.desc_ring.ring[self.position].len = 0;
// self.desc_ring.ring[self.position].buff_id = 0;
self.desc_ring.ring[self.position].flags = self.desc_ring.dev_wc.as_flags_used();
self.desc_ring.ring[self.position].flags = self.desc_ring.dev_wc.as_flags_used().into();
}

/// Updates the accessible len of the memory areas accessible by the drivers to be consistent with
Expand Down Expand Up @@ -744,25 +745,31 @@ impl<'a> WriteCtrl<'a> {
// descriptor.
if self.start == self.position {
let desc_ref = &mut self.desc_ring.ring[self.position];
desc_ref.address = paging::virt_to_phys(VirtAddr::from(mem_desc.ptr as u64)).into();
desc_ref.len = mem_desc.len as u32;
desc_ref.buff_id = mem_desc.id.as_ref().unwrap().0;
desc_ref
.address
.set(paging::virt_to_phys(VirtAddr::from(mem_desc.ptr as u64)).into());
desc_ref.len = (mem_desc.len as u32).into();
desc_ref.buff_id = (mem_desc.id.as_ref().unwrap().0).into();
// Remove possibly set avail and used flags
desc_ref.flags =
flags & !(DescrFlags::VIRTQ_DESC_F_AVAIL) & !(DescrFlags::VIRTQ_DESC_F_USED);
(flags & !(DescrFlags::VIRTQ_DESC_F_AVAIL) & !(DescrFlags::VIRTQ_DESC_F_USED))
.into();

self.buff_id = mem_desc.id.as_ref().unwrap().0;
self.incrmt();
} else {
let desc_ref = &mut self.desc_ring.ring[self.position];
desc_ref.address = paging::virt_to_phys(VirtAddr::from(mem_desc.ptr as u64)).into();
desc_ref.len = mem_desc.len as u32;
desc_ref.buff_id = self.buff_id;
desc_ref
.address
.set(paging::virt_to_phys(VirtAddr::from(mem_desc.ptr as u64)).into());
desc_ref.len = (mem_desc.len as u32).into();
desc_ref.buff_id = (self.buff_id).into();
// Remove possibly set avail and used flags and then set avail and used
// according to the current WrapCount.
desc_ref.flags =
(flags & !(DescrFlags::VIRTQ_DESC_F_AVAIL) & !(DescrFlags::VIRTQ_DESC_F_USED))
| self.desc_ring.drv_wc.as_flags_avail();
((flags & !(DescrFlags::VIRTQ_DESC_F_AVAIL) & !(DescrFlags::VIRTQ_DESC_F_USED))
| self.desc_ring.drv_wc.as_flags_avail())
.into();

self.incrmt()
}
Expand All @@ -779,54 +786,28 @@ impl<'a> WriteCtrl<'a> {
// The driver performs a suitable memory barrier to ensure the device sees the updated descriptor table and available ring before the next step.
// See Virtio specfification v1.1. - 2.7.21
fence(Ordering::SeqCst);
self.desc_ring.ring[self.start].flags |= self.wrap_at_init.as_flags_avail();
self.desc_ring.ring[self.start].flags |= self.wrap_at_init.as_flags_avail().into();
}
}

#[derive(Clone, Copy)]
#[repr(C, align(16))]
struct Descriptor {
address: u64,
len: u32,
buff_id: u16,
flags: u16,
address: little_endian::U64,
len: little_endian::U32,
buff_id: little_endian::U16,
flags: little_endian::U16,
}

impl Descriptor {
fn new(add: u64, len: u32, id: u16, flags: u16) -> Self {
Descriptor {
address: add,
len,
buff_id: id,
flags,
address: add.into(),
len: len.into(),
buff_id: id.into(),
flags: flags.into(),
}
}

fn to_le_bytes(self) -> [u8; 16] {
// 128 bits long raw descriptor bytes
let mut desc_bytes: [u8; 16] = [0; 16];

// Call to little endian, as device will read this and
// Virtio devices are inherently little endian coded.
let mem_addr: [u8; 8] = self.address.to_le_bytes();
desc_bytes[0..8].copy_from_slice(&mem_addr);

// Must be 32 bit in order to fulfill specification.
// MemPool.pull and .pull_untracked ensure this automatically
// which makes this cast safe.
let mem_len: [u8; 4] = self.len.to_le_bytes();
desc_bytes[8..12].copy_from_slice(&mem_len);

// Write BuffID as bytes in raw.
let id: [u8; 2] = self.buff_id.to_le_bytes();
desc_bytes[12..14].copy_from_slice(&id);

// Write flags as bytes in raw.
let flags: [u8; 2] = self.flags.to_le_bytes();
desc_bytes[14..16].copy_from_slice(&flags);

desc_bytes
}
}

/// Driver and device event suppression struct used in packed virtqueues.
Expand Down
67 changes: 34 additions & 33 deletions src/drivers/virtio/virtqueue/split.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use core::cell::RefCell;
use core::ptr;

use align_address::Align;
use zerocopy::little_endian;

#[cfg(not(feature = "pci"))]
use super::super::transport::mmio::{ComCfg, NotifCfg, NotifCtrl};
Expand All @@ -27,19 +28,19 @@ use crate::arch::mm::{paging, VirtAddr};
#[repr(C)]
#[derive(Copy, Clone)]
struct Descriptor {
address: u64,
len: u32,
flags: u16,
next: u16,
address: little_endian::U64,
len: little_endian::U32,
flags: little_endian::U16,
next: little_endian::U16,
}

impl Descriptor {
fn new(addr: u64, len: u32, flags: u16, next: u16) -> Self {
Descriptor {
address: addr,
len,
flags,
next,
address: addr.into(),
len: len.into(),
flags: flags.into(),
next: next.into(),
}
}
}
Expand All @@ -49,24 +50,24 @@ struct DescrTable {
}

struct AvailRing {
flags: &'static mut u16,
index: &'static mut u16,
ring: &'static mut [u16],
event: &'static mut u16,
flags: &'static mut little_endian::U16,
index: &'static mut little_endian::U16,
ring: &'static mut [little_endian::U16],
event: &'static mut little_endian::U16,
}

struct UsedRing {
flags: &'static mut u16,
index: *mut u16,
flags: &'static mut little_endian::U16,
index: *mut little_endian::U16,
ring: &'static mut [UsedElem],
event: &'static mut u16,
event: &'static mut little_endian::U16,
}

#[repr(C)]
#[derive(Copy, Clone)]
struct UsedElem {
id: u32,
len: u32,
id: little_endian::U32,
len: little_endian::U32,
}

struct DescrRing {
Expand Down Expand Up @@ -188,29 +189,29 @@ impl DescrRing {
}

self.ref_ring[index] = Some(Box::new(tkn));
self.avail_ring.ring[*self.avail_ring.index as usize % self.avail_ring.ring.len()] =
index as u16;
self.avail_ring.ring[self.avail_ring.index.get() as usize % self.avail_ring.ring.len()] =
(index as u16).into();

memory_barrier();
*self.avail_ring.index = self.avail_ring.index.wrapping_add(1);
*self.avail_ring.index = (self.avail_ring.index.get().wrapping_add(1)).into();

(0, 0)
}

fn poll(&mut self) {
while self.read_idx != unsafe { ptr::read_volatile(self.used_ring.index) } {
while self.read_idx != unsafe { ptr::read_volatile(self.used_ring.index).get() } {
let cur_ring_index = self.read_idx as usize % self.used_ring.ring.len();
let used_elem = unsafe { ptr::read_volatile(&self.used_ring.ring[cur_ring_index]) };

let mut tkn = self.ref_ring[used_elem.id as usize].take().expect(
let mut tkn = self.ref_ring[used_elem.id.get() as usize].take().expect(
"The buff_id is incorrect or the reference to the TransferToken was misplaced.",
);

if tkn.buff_tkn.as_ref().unwrap().recv_buff.as_ref().is_some() {
tkn.buff_tkn
.as_mut()
.unwrap()
.restr_size(None, Some(used_elem.len as usize))
.restr_size(None, Some(used_elem.len.get() as usize))
.unwrap();
}
tkn.state = TransferState::Finished;
Expand All @@ -225,15 +226,15 @@ impl DescrRing {
}

fn drv_enable_notif(&mut self) {
*self.avail_ring.flags = 0;
*self.avail_ring.flags = 0.into();
}

fn drv_disable_notif(&mut self) {
*self.avail_ring.flags = 1;
*self.avail_ring.flags = 1.into();
}

fn dev_is_notif(&self) -> bool {
*self.used_ring.flags & 1 == 0
*self.used_ring.flags & 1.into() == little_endian::U16::new(0)
}
}

Expand Down Expand Up @@ -340,13 +341,13 @@ impl Virtq for SplitVq {

let avail_ring = unsafe {
AvailRing {
flags: &mut *(avail_raw as *mut u16),
index: &mut *(avail_raw.offset(2) as *mut u16),
flags: &mut *(avail_raw as *mut little_endian::U16),
index: &mut *(avail_raw.offset(2) as *mut little_endian::U16),
ring: core::slice::from_raw_parts_mut(
avail_raw.offset(4) as *mut u16,
avail_raw.offset(4) as *mut little_endian::U16,
size as usize,
),
event: &mut *(avail_raw.offset(4 + 2 * (size as isize)) as *mut u16),
event: &mut *(avail_raw.offset(4 + 2 * (size as isize)) as *mut little_endian::U16),
}
};

Expand All @@ -358,13 +359,13 @@ impl Virtq for SplitVq {

let used_ring = unsafe {
UsedRing {
flags: &mut *(used_raw as *mut u16),
index: used_raw.offset(2) as *mut u16,
flags: &mut *(used_raw as *mut little_endian::U16),
index: used_raw.offset(2) as *mut little_endian::U16,
ring: core::slice::from_raw_parts_mut(
used_raw.offset(4) as *mut UsedElem,
size as usize,
),
event: &mut *(used_raw.offset(4 + 8 * (size as isize)) as *mut u16),
event: &mut *(used_raw.offset(4 + 8 * (size as isize)) as *mut little_endian::U16),
}
};

Expand Down