Skip to content

Commit

Permalink
clean up some disk table stuff
Browse files Browse the repository at this point in the history
  • Loading branch information
ClementTsang committed Feb 3, 2025
1 parent fb956e2 commit 8aa3dd1
Show file tree
Hide file tree
Showing 8 changed files with 78 additions and 109 deletions.
6 changes: 1 addition & 5 deletions src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,28 +156,24 @@ impl App {
for proc in self.states.proc_state.widget_states.values_mut() {
if proc.force_update_data {
proc.set_table_data(data_source);
proc.force_update_data = false;
}
}

for temp in self.states.temp_state.widget_states.values_mut() {
if temp.force_update_data {
temp.set_table_data(&data_source.temp_data);
temp.force_update_data = false;
}
}

for cpu in self.states.cpu_state.widget_states.values_mut() {
if cpu.force_update_data {
cpu.set_legend_data(&data_source.cpu_harvest);
cpu.force_update_data = false;
}
}

for disk in self.states.disk_state.widget_states.values_mut() {
if disk.force_update_data {
disk.set_table_data(data_source); // FIXME: (points_rework_v1) do more work when eating data, not in set table data; maybe separate PR
disk.force_update_data = false;
disk.set_table_data(data_source);
}
}
}
Expand Down
147 changes: 69 additions & 78 deletions src/app/data/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::{
collection::{cpu, disks, memory::MemHarvest, network, Data},
dec_bytes_per_second_string,
utils::data_units::DataUnit,
widgets::TempWidgetData,
widgets::{DiskWidgetData, TempWidgetData},
};

use super::{ProcessData, TimeSeriesData};
Expand All @@ -34,10 +34,9 @@ pub struct StoredData {
pub cpu_harvest: cpu::CpuHarvest,
pub load_avg_harvest: cpu::LoadAvgHarvest,
pub process_data: ProcessData,
pub disk_harvest: Vec<disks::DiskHarvest>,
// TODO: The IO labels are kinda weird.
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub io_labels: Vec<(String, String)>,
/// TODO: (points_rework_v1) Might be a better way to do this without having to store here?
pub prev_io: Vec<(u64, u64)>,
pub disk_harvest: Vec<DiskWidgetData>,
pub temp_data: Vec<TempWidgetData>,
#[cfg(feature = "battery")]
pub battery_harvest: Vec<batteries::BatteryData>,
Expand All @@ -56,9 +55,8 @@ impl Default for StoredData {
cpu_harvest: cpu::CpuHarvest::default(),
load_avg_harvest: cpu::LoadAvgHarvest::default(),
process_data: Default::default(),
prev_io: Vec::default(),
disk_harvest: Vec::default(),
io_labels_and_prev: Vec::default(),
io_labels: Vec::default(),
temp_data: Vec::default(),
#[cfg(feature = "battery")]
battery_harvest: Vec::default(),
Expand Down Expand Up @@ -125,7 +123,6 @@ impl StoredData {
self.load_avg_harvest = load_avg;
}

// TODO: (points_rework_v1) the map might be redundant, the types are the same.
self.temp_data = data
.temperature_sensors
.map(|sensors| {
Expand Down Expand Up @@ -169,8 +166,14 @@ impl StoredData {
.duration_since(self.last_update_time)
.as_secs_f64();

for (itx, device) in disks.iter().enumerate() {
let checked_name = {
self.disk_harvest.clear();

let prev_io_diff = disks.len().saturating_sub(self.prev_io.len());
self.prev_io.reserve(prev_io_diff);
self.prev_io.extend((0..prev_io_diff).map(|_| (0, 0)));

for (itx, device) in disks.into_iter().enumerate() {
let Some(checked_name) = ({
#[cfg(target_os = "windows")]
{
match &device.volume_name {
Expand All @@ -194,85 +197,73 @@ impl StoredData {
device.name.split('/').last()
}
}
}) else {
continue;
};

if let Some(checked_name) = checked_name {
let io_device = {
#[cfg(target_os = "macos")]
{
use std::sync::OnceLock;

use regex::Regex;

// Must trim one level further for macOS!
static DISK_REGEX: OnceLock<Regex> = OnceLock::new();

#[expect(
clippy::regex_creation_in_loops,
reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out."
)]
if let Some(new_name) = DISK_REGEX
.get_or_init(|| Regex::new(r"disk\d+").unwrap())
.find(checked_name)
{
io.get(new_name.as_str())
} else {
None
}
}
#[cfg(not(target_os = "macos"))]
{
io.get(checked_name)
}
};
let io_device = {
#[cfg(target_os = "macos")]
{
use std::sync::OnceLock;

if let Some(io_device) = io_device {
let (io_r_pt, io_w_pt) = if let Some(io) = io_device {
(io.read_bytes, io.write_bytes)
} else {
(0, 0)
};
use regex::Regex;

if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
// Must trim one level further for macOS!
static DISK_REGEX: OnceLock<Regex> = OnceLock::new();

if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
#[expect(
clippy::regex_creation_in_loops,
reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out."
)]
if let Some(new_name) = DISK_REGEX
.get_or_init(|| Regex::new(r"disk\d+").unwrap())
.find(checked_name)
{
io.get(new_name.as_str())
} else {
None
}
}
#[cfg(not(target_os = "macos"))]
{
io.get(checked_name)
}
};

if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64
/ time_since_last_harvest)
.round() as u64;

*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);

// TODO: idk why I'm generating this here tbh
if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = (
dec_bytes_per_second_string(r_rate),
dec_bytes_per_second_string(w_rate),
);
}
}
} else {
if self.io_labels.len() <= itx {
self.io_labels.push((String::default(), String::default()));
}
let (mut io_read, mut io_write) = ("N/A".to_string(), "N/A".to_string());
if let Some(Some(io_device)) = io_device {
if let Some(prev_io) = self.prev_io.get_mut(itx) {
let r_rate = ((io_device.read_bytes.saturating_sub(prev_io.0)) as f64
/ time_since_last_harvest)
.round() as u64;

if let Some(io_labels) = self.io_labels.get_mut(itx) {
*io_labels = ("N/A".to_string(), "N/A".to_string());
}
let w_rate = ((io_device.write_bytes.saturating_sub(prev_io.1)) as f64
/ time_since_last_harvest)
.round() as u64;

*prev_io = (io_device.read_bytes, io_device.write_bytes);

io_read = dec_bytes_per_second_string(r_rate);
io_write = dec_bytes_per_second_string(w_rate);
}
}
}

self.disk_harvest = disks;
let summed_total_bytes = match (device.used_space, device.free_space) {
(Some(used), Some(free)) => Some(used + free),
_ => None,
};

self.disk_harvest.push(DiskWidgetData {
name: device.name,
mount_point: device.mount_point,
free_bytes: device.free_space,
used_bytes: device.used_space,
total_bytes: device.total_space,
summed_total_bytes,
io_read,
io_write,
});
}
}
}

Expand Down
4 changes: 3 additions & 1 deletion src/canvas/components/time_graph/time_chart/points.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,15 @@ impl TimeChart<'_> {

// TODO: (points_rework_v1) Can we instead modify the range so it's based on the epoch rather than having to convert?
// TODO: (points_rework_v1) Is this efficient? Or should I prune using take_while first?
// TODO: (points_rework_v1) Should this be generic over dataset.graph_type?
for (curr, next) in values
.iter_along_base(times)
.rev()
.map(|(&time, &val)| {
let from_start: f64 =
(current_time.duration_since(time).as_millis() as f64).floor();

// XXX: Should this be generic over dataset.graph_type instead? That would allow us to move
// transformations behind a type - however, that also means that there's some complexity added.
(-from_start, self.scaling.scale(val))
})
.tuple_windows()
Expand Down
1 change: 0 additions & 1 deletion src/canvas/widgets/mem_basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ impl Painter {
.gauge_style(self.styles.ram_style),
);

// FIXME: (points_rework_v1) Change all of these to get the last point instead
if let Some(swap_harvest) = &data.swap_harvest {
let swap_percentage = swap_harvest.saturating_percentage();
let swap_label = memory_label(swap_harvest, app_state.basic_mode_use_percent);
Expand Down
1 change: 1 addition & 0 deletions src/widgets/cpu_graph.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,5 +178,6 @@ impl CpuWidgetState {
.chain(data.iter().map(CpuWidgetTableData::from_cpu_data))
.collect(),
);
self.force_update_data = false;
}
}
26 changes: 2 additions & 24 deletions src/widgets/disk_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -314,36 +314,14 @@ impl DiskTableWidget {
}

/// Update the current table data.
///
/// TODO: Move the conversion step into the eating part.
pub fn set_table_data(&mut self, data: &StoredData) {
let mut data = data
.disk_harvest
.iter()
.zip(&data.io_labels)
.map(|(disk, (io_read, io_write))| {
let summed_total_bytes = match (disk.used_space, disk.free_space) {
(Some(used), Some(free)) => Some(used + free),
_ => None,
};

DiskWidgetData {
name: disk.name.to_string(),
mount_point: disk.mount_point.to_string(),
free_bytes: disk.free_space,
used_bytes: disk.used_space,
total_bytes: disk.total_space,
summed_total_bytes,
io_read: io_read.to_string(),
io_write: io_write.to_string(),
}
})
.collect::<Vec<_>>();
let mut data = data.disk_harvest.clone();

if let Some(column) = self.table.columns.get(self.table.sort_index()) {
column.sort_by(&mut data, self.table.order());
}
self.table.set_data(data);
self.force_update_data = false;
}

pub fn set_index(&mut self, index: usize) {
Expand Down
1 change: 1 addition & 0 deletions src/widgets/process_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -405,6 +405,7 @@ impl ProcWidgetState {
}
};
self.table.set_data(data);
self.force_update_data = false;
}

fn get_tree_data(
Expand Down
1 change: 1 addition & 0 deletions src/widgets/temperature_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,5 +127,6 @@ impl TempWidgetState {
column.sort_by(&mut data, self.table.order());
}
self.table.set_data(data);
self.force_update_data = false;
}
}

0 comments on commit 8aa3dd1

Please sign in to comment.