diff --git a/src/app.rs b/src/app.rs index 09efcf883..87e116642 100644 --- a/src/app.rs +++ b/src/app.rs @@ -156,28 +156,24 @@ impl App { for proc in self.states.proc_state.widget_states.values_mut() { if proc.force_update_data { proc.set_table_data(data_source); - proc.force_update_data = false; } } for temp in self.states.temp_state.widget_states.values_mut() { if temp.force_update_data { temp.set_table_data(&data_source.temp_data); - temp.force_update_data = false; } } for cpu in self.states.cpu_state.widget_states.values_mut() { if cpu.force_update_data { cpu.set_legend_data(&data_source.cpu_harvest); - cpu.force_update_data = false; } } for disk in self.states.disk_state.widget_states.values_mut() { if disk.force_update_data { - disk.set_table_data(data_source); // FIXME: (points_rework_v1) do more work when eating data, not in set table data; maybe separate PR - disk.force_update_data = false; + disk.set_table_data(data_source); } } } diff --git a/src/app/data/store.rs b/src/app/data/store.rs index 52572b55b..caba51537 100644 --- a/src/app/data/store.rs +++ b/src/app/data/store.rs @@ -10,7 +10,7 @@ use crate::{ collection::{cpu, disks, memory::MemHarvest, network, Data}, dec_bytes_per_second_string, utils::data_units::DataUnit, - widgets::TempWidgetData, + widgets::{DiskWidgetData, TempWidgetData}, }; use super::{ProcessData, TimeSeriesData}; @@ -34,10 +34,9 @@ pub struct StoredData { pub cpu_harvest: cpu::CpuHarvest, pub load_avg_harvest: cpu::LoadAvgHarvest, pub process_data: ProcessData, - pub disk_harvest: Vec, - // TODO: The IO labels are kinda weird. - pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>, - pub io_labels: Vec<(String, String)>, + /// TODO: (points_rework_v1) Might be a better way to do this without having to store here? + pub prev_io: Vec<(u64, u64)>, + pub disk_harvest: Vec, pub temp_data: Vec, #[cfg(feature = "battery")] pub battery_harvest: Vec, @@ -56,9 +55,8 @@ impl Default for StoredData { cpu_harvest: cpu::CpuHarvest::default(), load_avg_harvest: cpu::LoadAvgHarvest::default(), process_data: Default::default(), + prev_io: Vec::default(), disk_harvest: Vec::default(), - io_labels_and_prev: Vec::default(), - io_labels: Vec::default(), temp_data: Vec::default(), #[cfg(feature = "battery")] battery_harvest: Vec::default(), @@ -125,7 +123,6 @@ impl StoredData { self.load_avg_harvest = load_avg; } - // TODO: (points_rework_v1) the map might be redundant, the types are the same. self.temp_data = data .temperature_sensors .map(|sensors| { @@ -169,8 +166,14 @@ impl StoredData { .duration_since(self.last_update_time) .as_secs_f64(); - for (itx, device) in disks.iter().enumerate() { - let checked_name = { + self.disk_harvest.clear(); + + let prev_io_diff = disks.len().saturating_sub(self.prev_io.len()); + self.prev_io.reserve(prev_io_diff); + self.prev_io.extend((0..prev_io_diff).map(|_| (0, 0))); + + for (itx, device) in disks.into_iter().enumerate() { + let Some(checked_name) = ({ #[cfg(target_os = "windows")] { match &device.volume_name { @@ -194,85 +197,73 @@ impl StoredData { device.name.split('/').last() } } + }) else { + continue; }; - if let Some(checked_name) = checked_name { - let io_device = { - #[cfg(target_os = "macos")] - { - use std::sync::OnceLock; - - use regex::Regex; - - // Must trim one level further for macOS! - static DISK_REGEX: OnceLock = OnceLock::new(); - - #[expect( - clippy::regex_creation_in_loops, - reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out." - )] - if let Some(new_name) = DISK_REGEX - .get_or_init(|| Regex::new(r"disk\d+").unwrap()) - .find(checked_name) - { - io.get(new_name.as_str()) - } else { - None - } - } - #[cfg(not(target_os = "macos"))] - { - io.get(checked_name) - } - }; + let io_device = { + #[cfg(target_os = "macos")] + { + use std::sync::OnceLock; - if let Some(io_device) = io_device { - let (io_r_pt, io_w_pt) = if let Some(io) = io_device { - (io.read_bytes, io.write_bytes) - } else { - (0, 0) - }; + use regex::Regex; - if self.io_labels.len() <= itx { - self.io_labels.push((String::default(), String::default())); - } + // Must trim one level further for macOS! + static DISK_REGEX: OnceLock = OnceLock::new(); - if self.io_labels_and_prev.len() <= itx { - self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt))); + #[expect( + clippy::regex_creation_in_loops, + reason = "this is fine since it's done via a static OnceLock. In the future though, separate it out." + )] + if let Some(new_name) = DISK_REGEX + .get_or_init(|| Regex::new(r"disk\d+").unwrap()) + .find(checked_name) + { + io.get(new_name.as_str()) + } else { + None } + } + #[cfg(not(target_os = "macos"))] + { + io.get(checked_name) + } + }; - if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) { - let r_rate = ((io_r_pt.saturating_sub(io_prev.0)) as f64 - / time_since_last_harvest) - .round() as u64; - let w_rate = ((io_w_pt.saturating_sub(io_prev.1)) as f64 - / time_since_last_harvest) - .round() as u64; - - *io_curr = (r_rate, w_rate); - *io_prev = (io_r_pt, io_w_pt); - - // TODO: idk why I'm generating this here tbh - if let Some(io_labels) = self.io_labels.get_mut(itx) { - *io_labels = ( - dec_bytes_per_second_string(r_rate), - dec_bytes_per_second_string(w_rate), - ); - } - } - } else { - if self.io_labels.len() <= itx { - self.io_labels.push((String::default(), String::default())); - } + let (mut io_read, mut io_write) = ("N/A".to_string(), "N/A".to_string()); + if let Some(Some(io_device)) = io_device { + if let Some(prev_io) = self.prev_io.get_mut(itx) { + let r_rate = ((io_device.read_bytes.saturating_sub(prev_io.0)) as f64 + / time_since_last_harvest) + .round() as u64; - if let Some(io_labels) = self.io_labels.get_mut(itx) { - *io_labels = ("N/A".to_string(), "N/A".to_string()); - } + let w_rate = ((io_device.write_bytes.saturating_sub(prev_io.1)) as f64 + / time_since_last_harvest) + .round() as u64; + + *prev_io = (io_device.read_bytes, io_device.write_bytes); + + io_read = dec_bytes_per_second_string(r_rate); + io_write = dec_bytes_per_second_string(w_rate); } } - } - self.disk_harvest = disks; + let summed_total_bytes = match (device.used_space, device.free_space) { + (Some(used), Some(free)) => Some(used + free), + _ => None, + }; + + self.disk_harvest.push(DiskWidgetData { + name: device.name, + mount_point: device.mount_point, + free_bytes: device.free_space, + used_bytes: device.used_space, + total_bytes: device.total_space, + summed_total_bytes, + io_read, + io_write, + }); + } } } diff --git a/src/canvas/components/time_graph/time_chart/points.rs b/src/canvas/components/time_graph/time_chart/points.rs index eb96472c8..95ec8e852 100644 --- a/src/canvas/components/time_graph/time_chart/points.rs +++ b/src/canvas/components/time_graph/time_chart/points.rs @@ -42,13 +42,15 @@ impl TimeChart<'_> { // TODO: (points_rework_v1) Can we instead modify the range so it's based on the epoch rather than having to convert? // TODO: (points_rework_v1) Is this efficient? Or should I prune using take_while first? - // TODO: (points_rework_v1) Should this be generic over dataset.graph_type? for (curr, next) in values .iter_along_base(times) .rev() .map(|(&time, &val)| { let from_start: f64 = (current_time.duration_since(time).as_millis() as f64).floor(); + + // XXX: Should this be generic over dataset.graph_type instead? That would allow us to move + // transformations behind a type - however, that also means that there's some complexity added. (-from_start, self.scaling.scale(val)) }) .tuple_windows() diff --git a/src/canvas/widgets/mem_basic.rs b/src/canvas/widgets/mem_basic.rs index 8cfd14b09..bc9024346 100644 --- a/src/canvas/widgets/mem_basic.rs +++ b/src/canvas/widgets/mem_basic.rs @@ -74,7 +74,6 @@ impl Painter { .gauge_style(self.styles.ram_style), ); - // FIXME: (points_rework_v1) Change all of these to get the last point instead if let Some(swap_harvest) = &data.swap_harvest { let swap_percentage = swap_harvest.saturating_percentage(); let swap_label = memory_label(swap_harvest, app_state.basic_mode_use_percent); diff --git a/src/widgets/cpu_graph.rs b/src/widgets/cpu_graph.rs index 52854bf7e..a0fc8edb6 100644 --- a/src/widgets/cpu_graph.rs +++ b/src/widgets/cpu_graph.rs @@ -178,5 +178,6 @@ impl CpuWidgetState { .chain(data.iter().map(CpuWidgetTableData::from_cpu_data)) .collect(), ); + self.force_update_data = false; } } diff --git a/src/widgets/disk_table.rs b/src/widgets/disk_table.rs index b65dfd9a2..c45942ef3 100644 --- a/src/widgets/disk_table.rs +++ b/src/widgets/disk_table.rs @@ -314,36 +314,14 @@ impl DiskTableWidget { } /// Update the current table data. - /// - /// TODO: Move the conversion step into the eating part. pub fn set_table_data(&mut self, data: &StoredData) { - let mut data = data - .disk_harvest - .iter() - .zip(&data.io_labels) - .map(|(disk, (io_read, io_write))| { - let summed_total_bytes = match (disk.used_space, disk.free_space) { - (Some(used), Some(free)) => Some(used + free), - _ => None, - }; - - DiskWidgetData { - name: disk.name.to_string(), - mount_point: disk.mount_point.to_string(), - free_bytes: disk.free_space, - used_bytes: disk.used_space, - total_bytes: disk.total_space, - summed_total_bytes, - io_read: io_read.to_string(), - io_write: io_write.to_string(), - } - }) - .collect::>(); + let mut data = data.disk_harvest.clone(); if let Some(column) = self.table.columns.get(self.table.sort_index()) { column.sort_by(&mut data, self.table.order()); } self.table.set_data(data); + self.force_update_data = false; } pub fn set_index(&mut self, index: usize) { diff --git a/src/widgets/process_table.rs b/src/widgets/process_table.rs index d1ec47e91..d930f3ea4 100644 --- a/src/widgets/process_table.rs +++ b/src/widgets/process_table.rs @@ -405,6 +405,7 @@ impl ProcWidgetState { } }; self.table.set_data(data); + self.force_update_data = false; } fn get_tree_data( diff --git a/src/widgets/temperature_table.rs b/src/widgets/temperature_table.rs index 83af383a7..d9a8e4a7e 100644 --- a/src/widgets/temperature_table.rs +++ b/src/widgets/temperature_table.rs @@ -127,5 +127,6 @@ impl TempWidgetState { column.sort_by(&mut data, self.table.order()); } self.table.set_data(data); + self.force_update_data = false; } }