From 134888dfecb2750a34dad5304bbcd5c2ea4b9b77 Mon Sep 17 00:00:00 2001 From: Clement Tsang <34804052+ClementTsang@users.noreply.github.com> Date: Sat, 10 May 2025 18:24:06 -0400 Subject: [PATCH] refactor: refactor some process grouping code (#1727) Remove one clone for process grouping. --- src/app/data/store.rs | 2 +- src/collection/processes.rs | 19 -------- src/lib.rs | 9 ++-- src/widgets/process_table.rs | 59 +++++++++++++++-------- src/widgets/process_table/process_data.rs | 5 -- 5 files changed, 43 insertions(+), 51 deletions(-) diff --git a/src/app/data/store.rs b/src/app/data/store.rs index 8bba227b..c9bbccab 100644 --- a/src/app/data/store.rs +++ b/src/app/data/store.rs @@ -19,7 +19,7 @@ use crate::{ /// TODO: Maybe reduce visibility of internal data, make it only accessible through DataStore? #[derive(Debug, Clone)] pub struct StoredData { - pub last_update_time: Instant, // FIXME: (points_rework_v1) remove this? + pub last_update_time: Instant, // FIXME: (points_rework_v1) we could be able to remove this with some more refactoring. pub timeseries_data: TimeSeriesData, pub network_harvest: network::NetworkHarvest, pub ram_harvest: Option, diff --git a/src/collection/processes.rs b/src/collection/processes.rs index a4250cee..1431438e 100644 --- a/src/collection/processes.rs +++ b/src/collection/processes.rs @@ -112,25 +112,6 @@ pub struct ProcessHarvest { // pub virt_kb: u64, } -impl ProcessHarvest { - pub(crate) fn add(&mut self, rhs: &ProcessHarvest) { - self.cpu_usage_percent += rhs.cpu_usage_percent; - self.mem_usage_bytes += rhs.mem_usage_bytes; - self.mem_usage_percent += rhs.mem_usage_percent; - self.read_bytes_per_sec += rhs.read_bytes_per_sec; - self.write_bytes_per_sec += rhs.write_bytes_per_sec; - self.total_read_bytes += rhs.total_read_bytes; - self.total_write_bytes += rhs.total_write_bytes; - self.time = self.time.max(rhs.time); - #[cfg(feature = "gpu")] - { - self.gpu_mem += rhs.gpu_mem; - self.gpu_util += rhs.gpu_util; - self.gpu_mem_percent += rhs.gpu_mem_percent; - } - } -} - impl DataCollector { pub(crate) fn get_processes(&mut self) -> CollectionResult> { cfg_if! { diff --git a/src/lib.rs b/src/lib.rs index 5586c27c..93f985ae 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -270,7 +270,7 @@ fn create_collection_thread( }) } -/// Main code to call. +/// Main code to call to start bottom. #[inline] pub fn start_bottom(enable_error_hook: &mut bool) -> anyhow::Result<()> { // let _profiler = dhat::Profiler::new_heap(); @@ -367,8 +367,9 @@ pub fn start_bottom(enable_error_hook: &mut bool) -> anyhow::Result<()> { panic::set_hook(Box::new(panic_hook)); // Set termination hook - // TODO: On UNIX, use signal-hook to handle cleanup as well. ctrlc::set_handler(move || { + // TODO: Consider using signal-hook (https://github.com/vorner/signal-hook) to handle + // more types of signals? let _ = sender.send(BottomEvent::Terminate); })?; @@ -381,9 +382,7 @@ pub fn start_bottom(enable_error_hook: &mut bool) -> anyhow::Result<()> { loop { if let Ok(recv) = receiver.recv() { match recv { - BottomEvent::Terminate => { - break; - } + BottomEvent::Terminate => break, BottomEvent::Resize => { try_drawing(&mut terminal, &mut app, &mut painter)?; } diff --git a/src/widgets/process_table.rs b/src/widgets/process_table.rs index 919ef3f2..7ba88d3a 100644 --- a/src/widgets/process_table.rs +++ b/src/widgets/process_table.rs @@ -676,7 +676,8 @@ impl ProcWidgetState { let mut id_pid_map: HashMap> = HashMap::default(); let mut filtered_data: Vec = if let ProcWidgetMode::Grouped = self.mode { - let mut id_process_mapping: HashMap<&String, ProcessHarvest> = HashMap::default(); + let mut id_process_mapping: HashMap<&String, ProcWidgetData> = HashMap::default(); + for process in filtered_iter { let id = if is_using_command { &process.command @@ -691,30 +692,46 @@ impl ProcWidgetState { id_pid_map.insert(id.clone(), vec![pid]); } - if let Some(grouped_process_harvest) = id_process_mapping.get_mut(id) { - grouped_process_harvest.add(process); + if let Some(pwd) = id_process_mapping.get_mut(id) { + pwd.cpu_usage_percent += process.cpu_usage_percent; + + match &mut pwd.mem_usage { + MemUsage::Percent(usage) => { + *usage += process.mem_usage_percent; + } + MemUsage::Bytes(usage) => { + *usage += process.mem_usage_bytes; + } + } + + pwd.rps += process.read_bytes_per_sec; + pwd.wps += process.write_bytes_per_sec; + pwd.total_read += process.total_read_bytes; + pwd.total_write += process.total_write_bytes; + pwd.time = pwd.time.max(process.time); + #[cfg(feature = "gpu")] + { + pwd.gpu_usage += process.gpu_util; + match &mut pwd.gpu_mem_usage { + MemUsage::Percent(usage) => { + *usage += process.gpu_mem_percent; + } + MemUsage::Bytes(usage) => { + *usage += process.gpu_mem; + } + } + } + + pwd.num_similar += 1; } else { - // FIXME: [PERF] could maybe eliminate an allocation here in the grouped mode... - // or maybe just avoid the entire transformation step, making an alloc fine. - id_process_mapping.insert(id, process.clone()); + id_process_mapping.insert( + id, + ProcWidgetData::from_data(process, is_using_command, is_mem_percent), + ); } } - id_process_mapping - .values() - .map(|process| { - let id = if is_using_command { - &process.command - } else { - &process.name - }; - - let num_similar = id_pid_map.get(id).map(|val| val.len()).unwrap_or(1) as u64; - - ProcWidgetData::from_data(process, is_using_command, is_mem_percent) - .num_similar(num_similar) - }) - .collect() + id_process_mapping.into_values().collect() } else { filtered_iter .map(|process| ProcWidgetData::from_data(process, is_using_command, is_mem_percent)) diff --git a/src/widgets/process_table/process_data.rs b/src/widgets/process_table/process_data.rs index bc1373cc..bd5ff783 100644 --- a/src/widgets/process_table/process_data.rs +++ b/src/widgets/process_table/process_data.rs @@ -259,11 +259,6 @@ impl ProcWidgetData { } } - pub fn num_similar(mut self, num_similar: u64) -> Self { - self.num_similar = num_similar; - self - } - pub fn disabled(mut self, disabled: bool) -> Self { self.disabled = disabled; self