From 5cc8a08376177ebc331c5a4eb371987b383d5da9 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 19 Jan 2020 20:57:05 -0500 Subject: [PATCH 01/26] Only generate regexes during regex mode --- src/app.rs | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/src/app.rs b/src/app.rs index 99d4641b..244f3872 100644 --- a/src/app.rs +++ b/src/app.rs @@ -233,6 +233,17 @@ impl App { if !self.is_in_dialog() && self.is_searching() { if let ApplicationPosition::ProcessSearch = self.current_application_position { self.use_simple = !self.use_simple; + + // Update to latest (when simple is on this is not updated) + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } + + // Force update to process display in GUI self.update_process_gui = true; } } @@ -266,12 +277,13 @@ impl App { self.current_search_query .remove(self.current_cursor_position); - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } } @@ -352,12 +364,13 @@ impl App { .insert(self.current_cursor_position, caught_char); self.current_cursor_position += 1; - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } else { match caught_char { From f04ff034f4b3084b11811331acb9ee5fa5724b5f Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 19 Jan 2020 22:35:05 -0500 Subject: [PATCH 02/26] Updated cargo and .gitignore --- .gitignore | 5 ++++- Cargo.toml | 10 +++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index a154bca7..61746d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,7 @@ Cargo.lock **/*.rs.bk *.log -.vscode \ No newline at end of file +.vscode +rust-unmangle +*.svg +*.data \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 316e8ffc..979e1183 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,17 +24,17 @@ chrono = "0.4.10" clap = "2.33.0" crossterm = "0.14" failure = "0.1.6" -fern = "0.5" +fern = "0.5.9" futures-timer = "2.0.2" futures = "0.3.1" heim = "0.0.9" -log = "0.4" +log = "0.4.8" regex = "1.3.3" -sysinfo = "0.9" #0.9 seems to be the last working version for my Ryzen PC... +sysinfo = "0.9.6" #0.9 seems to be the last working version for my Ryzen PC... tokio = "0.2.9" -winapi = "0.3" +winapi = "0.3.8" tui = {version = "0.8", features = ["crossterm"], default-features = false } -lazy_static = "1.4" +lazy_static = "1.4.0" [dev-dependencies] assert_cmd = "0.12" From e356b94867a1e3494e19a974e102da5202b87242 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Mon, 20 Jan 2020 01:28:30 -0500 Subject: [PATCH 03/26] Slightly optimized how networking is... I think. --- src/app/data_collection.rs | 89 ++++++++++++++++--------- src/app/data_collection/network.rs | 82 +++++++++++++++-------- src/data_conversion.rs | 100 +++++++++++++---------------- src/main.rs | 2 +- 4 files changed, 157 insertions(+), 116 deletions(-) diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 54a76bb4..5042c6d6 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -23,7 +23,7 @@ fn push_if_valid(result: &Result, vector_to_push: &mut } } -#[derive(Debug, Default, Clone)] +#[derive(Clone, Debug, Default)] pub struct Data { pub list_of_cpu_packages: Vec, pub list_of_io: Vec, @@ -31,10 +31,10 @@ pub struct Data { pub memory: Vec, pub swap: Vec, pub list_of_temperature_sensor: Vec, - pub network: Vec, + pub network: network::NetworkStorage, pub list_of_processes: Vec, pub grouped_list_of_processes: Option>, - pub list_of_disks: Vec, // Only need to keep a list of disks and their data + pub list_of_disks: Vec, } pub struct DataState { @@ -45,9 +45,6 @@ pub struct DataState { prev_pid_stats: HashMap, prev_idle: f64, prev_non_idle: f64, - prev_net_rx_bytes: u64, - prev_net_tx_bytes: u64, - prev_net_access_time: Instant, temperature_type: temperature::TemperatureType, last_clean: Instant, // Last time stale data was cleared use_current_cpu_total: bool, @@ -63,9 +60,6 @@ impl Default for DataState { prev_pid_stats: HashMap::new(), prev_idle: 0_f64, prev_non_idle: 0_f64, - prev_net_rx_bytes: 0, - prev_net_tx_bytes: 0, - prev_net_access_time: Instant::now(), temperature_type: temperature::TemperatureType::Celsius, last_clean: Instant::now(), use_current_cpu_total: false, @@ -97,18 +91,61 @@ impl DataState { let current_instant = std::time::Instant::now(); + // Network + let new_network_data = network::get_network_data( + &self.sys, + &self.data.network.last_collection_time, + &mut self.data.network.total_rx, + &mut self.data.network.total_tx, + ¤t_instant, + ) + .await; + + let joining_points: Option> = + if !self.data.network.data_points.is_empty() { + if let Some(prev_data) = self + .data + .network + .data_points + .get(&self.data.network.last_collection_time) + { + // If not empty, inject joining points + + let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; + let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; + let time_gap = current_instant + .duration_since(self.data.network.last_collection_time) + .as_millis() as f64; + + let mut new_joining_points = Vec::new(); + + for idx in 0..100 { + new_joining_points.push(network::NetworkJoinPoint { + rx: prev_data.0.rx as f64 + rx_diff / 100.0 * idx as f64, + tx: prev_data.0.tx as f64 + tx_diff / 100.0 * idx as f64, + time_offset_milliseconds: time_gap / 100.0 * (100 - idx) as f64, + }); + } + Some(new_joining_points) + } else { + None + } + } else { + None + }; + + // Set values + self.data.network.rx = new_network_data.rx; + self.data.network.tx = new_network_data.tx; + self.data.network.last_collection_time = current_instant; + + // Add new point + self.data + .network + .data_points + .insert(current_instant, (new_network_data, joining_points)); + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! - push_if_valid( - &network::get_network_data( - &self.sys, - &mut self.prev_net_rx_bytes, - &mut self.prev_net_tx_bytes, - &mut self.prev_net_access_time, - ¤t_instant, - ) - .await, - &mut self.data.network, - ); push_if_valid( &cpu::get_cpu_data_list(&self.sys, ¤t_instant), &mut self.data.list_of_cpu_packages, @@ -167,6 +204,8 @@ impl DataState { self.prev_pid_stats.remove(&stale); } + // TODO: [OPT] cleaning stale network + self.data.list_of_cpu_packages = self .data .list_of_cpu_packages @@ -197,16 +236,6 @@ impl DataState { }) .collect::>(); - self.data.network = self - .data - .network - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - self.data.list_of_io = self .data .list_of_io diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index 1ba7c90f..c9b97de6 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -1,34 +1,58 @@ use futures::StreamExt; use heim::net; use heim::units::information::byte; +use std::collections::BTreeMap; use std::time::Instant; use sysinfo::{NetworkExt, System, SystemExt}; -#[derive(Debug, Clone)] -/// Note all values are in bytes... -pub struct NetworkData { +#[derive(Clone, Debug)] +pub struct NetworkJoinPoint { + pub rx: f64, + pub tx: f64, + pub time_offset_milliseconds: f64, +} + +#[derive(Clone, Debug)] +pub struct NetworkStorage { + pub data_points: BTreeMap>)>, pub rx: u64, pub tx: u64, pub total_rx: u64, pub total_tx: u64, - pub instant: Instant, + pub last_collection_time: Instant, +} + +impl Default for NetworkStorage { + fn default() -> Self { + NetworkStorage { + data_points: BTreeMap::default(), + rx: 0, + tx: 0, + total_rx: 0, + total_tx: 0, + last_collection_time: Instant::now(), + } + } +} + +#[derive(Clone, Debug)] +/// Note all values are in bytes... +pub struct NetworkData { + pub rx: u64, + pub tx: u64, } pub async fn get_network_data( - sys: &System, prev_net_rx_bytes: &mut u64, prev_net_tx_bytes: &mut u64, - prev_net_access_time: &mut Instant, curr_time: &Instant, -) -> crate::utils::error::Result { + sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, + curr_time: &Instant, +) -> NetworkData { + // FIXME: [WIN] Track current total bytes... also is this accurate? if cfg!(target_os = "windows") { let network_data = sys.get_network(); - - *prev_net_access_time = *curr_time; - Ok(NetworkData { + NetworkData { rx: network_data.get_income(), tx: network_data.get_outcome(), - total_rx: 0, - total_tx: 0, - instant: *prev_net_access_time, - }) + } } else { let mut io_data = net::io_counters(); let mut net_rx: u64 = 0; @@ -40,21 +64,23 @@ pub async fn get_network_data( net_tx += io.bytes_sent().get::(); } } - let cur_time = Instant::now(); - let elapsed_time = cur_time.duration_since(*prev_net_access_time).as_secs_f64(); + let elapsed_time = curr_time + .duration_since(*prev_net_access_time) + .as_secs_f64(); - let rx = ((net_rx - *prev_net_rx_bytes) as f64 / elapsed_time) as u64; - let tx = ((net_tx - *prev_net_tx_bytes) as f64 / elapsed_time) as u64; + if *prev_net_rx == 0 { + *prev_net_rx = net_rx; + } - *prev_net_rx_bytes = net_rx; - *prev_net_tx_bytes = net_tx; - *prev_net_access_time = cur_time; - Ok(NetworkData { - rx, - tx, - total_rx: *prev_net_rx_bytes, - total_tx: *prev_net_tx_bytes, - instant: *prev_net_access_time, - }) + if *prev_net_tx == 0 { + *prev_net_tx = net_tx; + } + + let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; + let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; + + *prev_net_rx = net_rx; + *prev_net_tx = net_tx; + NetworkData { rx, tx } } } diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 5def8bf0..58322d97 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -399,64 +399,60 @@ pub fn update_network_data_points(app_data: &data_collection::Data) -> Converted } pub fn convert_network_data_points( - network_data: &[data_collection::network::NetworkData], + network_data: &data_collection::network::NetworkStorage, ) -> ConvertedNetworkData { let mut rx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new(); - for data in network_data { - let current_time = std::time::Instant::now(); + let current_time = network_data.last_collection_time; + for (time, data) in &network_data.data_points { + let time_from_start: f64 = ((TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + * 10_f64) + .floor(); + + // Insert in joiner points + if let Some(joiners) = &data.1 { + for joiner in joiners { + let offset_time = time_from_start - joiner.time_offset_milliseconds as f64 * 10_f64; + rx.push(( + offset_time, + if joiner.rx > 0.0 { + (joiner.rx).log(2.0) + } else { + 0.0 + }, + )); + + tx.push(( + offset_time, + if joiner.tx > 0.0 { + (joiner.tx).log(2.0) + } else { + 0.0 + }, + )); + } + } + + // Insert in main points let rx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.rx > 0 { - (data.rx as f64).log(2.0) + time_from_start, + if data.0.rx > 0 { + (data.0.rx as f64).log(2.0) } else { 0.0 }, ); let tx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.tx > 0 { - (data.tx as f64).log(2.0) + time_from_start, + if data.0.tx > 0 { + (data.0.tx as f64).log(2.0) } else { 0.0 }, ); - //debug!("Plotting: {:?} bytes rx, {:?} bytes tx", rx_data, tx_data); - - // Now, inject our joining points... - if !rx.is_empty() { - let previous_element_data = *(rx.last().unwrap()); - for idx in 0..50 { - rx.push(( - previous_element_data.0 - + ((rx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((rx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - - // Now, inject our joining points... - if !tx.is_empty() { - let previous_element_data = *(tx.last().unwrap()); - for idx in 0..50 { - tx.push(( - previous_element_data.0 - + ((tx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((tx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - rx.push(rx_data); tx.push(tx_data); } @@ -466,13 +462,8 @@ pub fn convert_network_data_points( let total_tx_converted_result: (f64, String); let tx_converted_result: (f64, String); - if let Some(last_num_bytes_entry) = network_data.last() { - rx_converted_result = get_exact_byte_values(last_num_bytes_entry.rx, false); - total_rx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_rx, false) - } else { - rx_converted_result = get_exact_byte_values(0, false); - total_rx_converted_result = get_exact_byte_values(0, false); - } + rx_converted_result = get_exact_byte_values(network_data.rx, false); + total_rx_converted_result = get_exact_byte_values(network_data.total_rx, false); let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1); let total_rx_display = if cfg!(not(target_os = "windows")) { format!( @@ -483,13 +474,8 @@ pub fn convert_network_data_points( "N/A".to_string() }; - if let Some(last_num_bytes_entry) = network_data.last() { - tx_converted_result = get_exact_byte_values(last_num_bytes_entry.tx, false); - total_tx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_tx, false); - } else { - tx_converted_result = get_exact_byte_values(0, false); - total_tx_converted_result = get_exact_byte_values(0, false); - } + tx_converted_result = get_exact_byte_values(network_data.tx, false); + total_tx_converted_result = get_exact_byte_values(network_data.total_tx, false); let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1); let total_tx_display = if cfg!(not(target_os = "windows")) { format!( diff --git a/src/main.rs b/src/main.rs index 47b55fb6..043ddcf5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -199,7 +199,7 @@ fn main() -> error::Result<()> { } futures::executor::block_on(data_state.update_data()); tx.send(Event::Update(Box::from(data_state.data.clone()))) - .unwrap(); + .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it if first_run { // Fix for if you set a really long time for update periods (and just gives a faster first value) From c0df2e6c334c0885c112187320172e6cd10b51b7 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 19 Jan 2020 20:57:05 -0500 Subject: [PATCH 04/26] Only generate regexes during regex mode --- src/app.rs | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/src/app.rs b/src/app.rs index 99d4641b..244f3872 100644 --- a/src/app.rs +++ b/src/app.rs @@ -233,6 +233,17 @@ impl App { if !self.is_in_dialog() && self.is_searching() { if let ApplicationPosition::ProcessSearch = self.current_application_position { self.use_simple = !self.use_simple; + + // Update to latest (when simple is on this is not updated) + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } + + // Force update to process display in GUI self.update_process_gui = true; } } @@ -266,12 +277,13 @@ impl App { self.current_search_query .remove(self.current_cursor_position); - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } } @@ -352,12 +364,13 @@ impl App { .insert(self.current_cursor_position, caught_char); self.current_cursor_position += 1; - // TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex! - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; + if !self.use_simple { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else { + regex::Regex::new(&(self.current_search_query)) + }; + } self.update_process_gui = true; } else { match caught_char { From ae6e27d25a1d20419aed75a2b9165858a29eda92 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 19 Jan 2020 22:35:05 -0500 Subject: [PATCH 05/26] Updated cargo and .gitignore --- .gitignore | 5 ++++- Cargo.toml | 10 +++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index a154bca7..61746d1c 100644 --- a/.gitignore +++ b/.gitignore @@ -10,4 +10,7 @@ Cargo.lock **/*.rs.bk *.log -.vscode \ No newline at end of file +.vscode +rust-unmangle +*.svg +*.data \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 316e8ffc..979e1183 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,17 +24,17 @@ chrono = "0.4.10" clap = "2.33.0" crossterm = "0.14" failure = "0.1.6" -fern = "0.5" +fern = "0.5.9" futures-timer = "2.0.2" futures = "0.3.1" heim = "0.0.9" -log = "0.4" +log = "0.4.8" regex = "1.3.3" -sysinfo = "0.9" #0.9 seems to be the last working version for my Ryzen PC... +sysinfo = "0.9.6" #0.9 seems to be the last working version for my Ryzen PC... tokio = "0.2.9" -winapi = "0.3" +winapi = "0.3.8" tui = {version = "0.8", features = ["crossterm"], default-features = false } -lazy_static = "1.4" +lazy_static = "1.4.0" [dev-dependencies] assert_cmd = "0.12" From 840b0cccc8549efd7ffb6ddde8bb5d2319fe6665 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Mon, 20 Jan 2020 01:28:30 -0500 Subject: [PATCH 06/26] Slightly optimized how networking is... I think. --- src/app/data_collection.rs | 89 ++++++++++++++++--------- src/app/data_collection/network.rs | 82 +++++++++++++++-------- src/data_conversion.rs | 100 +++++++++++++---------------- src/main.rs | 2 +- 4 files changed, 157 insertions(+), 116 deletions(-) diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 54a76bb4..5042c6d6 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -23,7 +23,7 @@ fn push_if_valid(result: &Result, vector_to_push: &mut } } -#[derive(Debug, Default, Clone)] +#[derive(Clone, Debug, Default)] pub struct Data { pub list_of_cpu_packages: Vec, pub list_of_io: Vec, @@ -31,10 +31,10 @@ pub struct Data { pub memory: Vec, pub swap: Vec, pub list_of_temperature_sensor: Vec, - pub network: Vec, + pub network: network::NetworkStorage, pub list_of_processes: Vec, pub grouped_list_of_processes: Option>, - pub list_of_disks: Vec, // Only need to keep a list of disks and their data + pub list_of_disks: Vec, } pub struct DataState { @@ -45,9 +45,6 @@ pub struct DataState { prev_pid_stats: HashMap, prev_idle: f64, prev_non_idle: f64, - prev_net_rx_bytes: u64, - prev_net_tx_bytes: u64, - prev_net_access_time: Instant, temperature_type: temperature::TemperatureType, last_clean: Instant, // Last time stale data was cleared use_current_cpu_total: bool, @@ -63,9 +60,6 @@ impl Default for DataState { prev_pid_stats: HashMap::new(), prev_idle: 0_f64, prev_non_idle: 0_f64, - prev_net_rx_bytes: 0, - prev_net_tx_bytes: 0, - prev_net_access_time: Instant::now(), temperature_type: temperature::TemperatureType::Celsius, last_clean: Instant::now(), use_current_cpu_total: false, @@ -97,18 +91,61 @@ impl DataState { let current_instant = std::time::Instant::now(); + // Network + let new_network_data = network::get_network_data( + &self.sys, + &self.data.network.last_collection_time, + &mut self.data.network.total_rx, + &mut self.data.network.total_tx, + ¤t_instant, + ) + .await; + + let joining_points: Option> = + if !self.data.network.data_points.is_empty() { + if let Some(prev_data) = self + .data + .network + .data_points + .get(&self.data.network.last_collection_time) + { + // If not empty, inject joining points + + let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; + let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; + let time_gap = current_instant + .duration_since(self.data.network.last_collection_time) + .as_millis() as f64; + + let mut new_joining_points = Vec::new(); + + for idx in 0..100 { + new_joining_points.push(network::NetworkJoinPoint { + rx: prev_data.0.rx as f64 + rx_diff / 100.0 * idx as f64, + tx: prev_data.0.tx as f64 + tx_diff / 100.0 * idx as f64, + time_offset_milliseconds: time_gap / 100.0 * (100 - idx) as f64, + }); + } + Some(new_joining_points) + } else { + None + } + } else { + None + }; + + // Set values + self.data.network.rx = new_network_data.rx; + self.data.network.tx = new_network_data.tx; + self.data.network.last_collection_time = current_instant; + + // Add new point + self.data + .network + .data_points + .insert(current_instant, (new_network_data, joining_points)); + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! - push_if_valid( - &network::get_network_data( - &self.sys, - &mut self.prev_net_rx_bytes, - &mut self.prev_net_tx_bytes, - &mut self.prev_net_access_time, - ¤t_instant, - ) - .await, - &mut self.data.network, - ); push_if_valid( &cpu::get_cpu_data_list(&self.sys, ¤t_instant), &mut self.data.list_of_cpu_packages, @@ -167,6 +204,8 @@ impl DataState { self.prev_pid_stats.remove(&stale); } + // TODO: [OPT] cleaning stale network + self.data.list_of_cpu_packages = self .data .list_of_cpu_packages @@ -197,16 +236,6 @@ impl DataState { }) .collect::>(); - self.data.network = self - .data - .network - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - self.data.list_of_io = self .data .list_of_io diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index 1ba7c90f..c9b97de6 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -1,34 +1,58 @@ use futures::StreamExt; use heim::net; use heim::units::information::byte; +use std::collections::BTreeMap; use std::time::Instant; use sysinfo::{NetworkExt, System, SystemExt}; -#[derive(Debug, Clone)] -/// Note all values are in bytes... -pub struct NetworkData { +#[derive(Clone, Debug)] +pub struct NetworkJoinPoint { + pub rx: f64, + pub tx: f64, + pub time_offset_milliseconds: f64, +} + +#[derive(Clone, Debug)] +pub struct NetworkStorage { + pub data_points: BTreeMap>)>, pub rx: u64, pub tx: u64, pub total_rx: u64, pub total_tx: u64, - pub instant: Instant, + pub last_collection_time: Instant, +} + +impl Default for NetworkStorage { + fn default() -> Self { + NetworkStorage { + data_points: BTreeMap::default(), + rx: 0, + tx: 0, + total_rx: 0, + total_tx: 0, + last_collection_time: Instant::now(), + } + } +} + +#[derive(Clone, Debug)] +/// Note all values are in bytes... +pub struct NetworkData { + pub rx: u64, + pub tx: u64, } pub async fn get_network_data( - sys: &System, prev_net_rx_bytes: &mut u64, prev_net_tx_bytes: &mut u64, - prev_net_access_time: &mut Instant, curr_time: &Instant, -) -> crate::utils::error::Result { + sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, + curr_time: &Instant, +) -> NetworkData { + // FIXME: [WIN] Track current total bytes... also is this accurate? if cfg!(target_os = "windows") { let network_data = sys.get_network(); - - *prev_net_access_time = *curr_time; - Ok(NetworkData { + NetworkData { rx: network_data.get_income(), tx: network_data.get_outcome(), - total_rx: 0, - total_tx: 0, - instant: *prev_net_access_time, - }) + } } else { let mut io_data = net::io_counters(); let mut net_rx: u64 = 0; @@ -40,21 +64,23 @@ pub async fn get_network_data( net_tx += io.bytes_sent().get::(); } } - let cur_time = Instant::now(); - let elapsed_time = cur_time.duration_since(*prev_net_access_time).as_secs_f64(); + let elapsed_time = curr_time + .duration_since(*prev_net_access_time) + .as_secs_f64(); - let rx = ((net_rx - *prev_net_rx_bytes) as f64 / elapsed_time) as u64; - let tx = ((net_tx - *prev_net_tx_bytes) as f64 / elapsed_time) as u64; + if *prev_net_rx == 0 { + *prev_net_rx = net_rx; + } - *prev_net_rx_bytes = net_rx; - *prev_net_tx_bytes = net_tx; - *prev_net_access_time = cur_time; - Ok(NetworkData { - rx, - tx, - total_rx: *prev_net_rx_bytes, - total_tx: *prev_net_tx_bytes, - instant: *prev_net_access_time, - }) + if *prev_net_tx == 0 { + *prev_net_tx = net_tx; + } + + let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; + let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; + + *prev_net_rx = net_rx; + *prev_net_tx = net_tx; + NetworkData { rx, tx } } } diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 5def8bf0..58322d97 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -399,64 +399,60 @@ pub fn update_network_data_points(app_data: &data_collection::Data) -> Converted } pub fn convert_network_data_points( - network_data: &[data_collection::network::NetworkData], + network_data: &data_collection::network::NetworkStorage, ) -> ConvertedNetworkData { let mut rx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new(); - for data in network_data { - let current_time = std::time::Instant::now(); + let current_time = network_data.last_collection_time; + for (time, data) in &network_data.data_points { + let time_from_start: f64 = ((TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + * 10_f64) + .floor(); + + // Insert in joiner points + if let Some(joiners) = &data.1 { + for joiner in joiners { + let offset_time = time_from_start - joiner.time_offset_milliseconds as f64 * 10_f64; + rx.push(( + offset_time, + if joiner.rx > 0.0 { + (joiner.rx).log(2.0) + } else { + 0.0 + }, + )); + + tx.push(( + offset_time, + if joiner.tx > 0.0 { + (joiner.tx).log(2.0) + } else { + 0.0 + }, + )); + } + } + + // Insert in main points let rx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.rx > 0 { - (data.rx as f64).log(2.0) + time_from_start, + if data.0.rx > 0 { + (data.0.rx as f64).log(2.0) } else { 0.0 }, ); let tx_data = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.tx > 0 { - (data.tx as f64).log(2.0) + time_from_start, + if data.0.tx > 0 { + (data.0.tx as f64).log(2.0) } else { 0.0 }, ); - //debug!("Plotting: {:?} bytes rx, {:?} bytes tx", rx_data, tx_data); - - // Now, inject our joining points... - if !rx.is_empty() { - let previous_element_data = *(rx.last().unwrap()); - for idx in 0..50 { - rx.push(( - previous_element_data.0 - + ((rx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((rx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - - // Now, inject our joining points... - if !tx.is_empty() { - let previous_element_data = *(tx.last().unwrap()); - for idx in 0..50 { - tx.push(( - previous_element_data.0 - + ((tx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((tx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } - } - rx.push(rx_data); tx.push(tx_data); } @@ -466,13 +462,8 @@ pub fn convert_network_data_points( let total_tx_converted_result: (f64, String); let tx_converted_result: (f64, String); - if let Some(last_num_bytes_entry) = network_data.last() { - rx_converted_result = get_exact_byte_values(last_num_bytes_entry.rx, false); - total_rx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_rx, false) - } else { - rx_converted_result = get_exact_byte_values(0, false); - total_rx_converted_result = get_exact_byte_values(0, false); - } + rx_converted_result = get_exact_byte_values(network_data.rx, false); + total_rx_converted_result = get_exact_byte_values(network_data.total_rx, false); let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1); let total_rx_display = if cfg!(not(target_os = "windows")) { format!( @@ -483,13 +474,8 @@ pub fn convert_network_data_points( "N/A".to_string() }; - if let Some(last_num_bytes_entry) = network_data.last() { - tx_converted_result = get_exact_byte_values(last_num_bytes_entry.tx, false); - total_tx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_tx, false); - } else { - tx_converted_result = get_exact_byte_values(0, false); - total_tx_converted_result = get_exact_byte_values(0, false); - } + tx_converted_result = get_exact_byte_values(network_data.tx, false); + total_tx_converted_result = get_exact_byte_values(network_data.total_tx, false); let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1); let total_tx_display = if cfg!(not(target_os = "windows")) { format!( diff --git a/src/main.rs b/src/main.rs index 47b55fb6..043ddcf5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -199,7 +199,7 @@ fn main() -> error::Result<()> { } futures::executor::block_on(data_state.update_data()); tx.send(Event::Update(Box::from(data_state.data.clone()))) - .unwrap(); + .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it if first_run { // Fix for if you set a really long time for update periods (and just gives a faster first value) From 0fdab76cf5df3c874e27ab7d89cb6f16865023c1 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 22:59:42 -0500 Subject: [PATCH 07/26] Tweaked network graph generation a bit to match master --- src/app/data_collection.rs | 11 +++++++---- src/canvas.rs | 6 +++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 5042c6d6..8d3c4dbe 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -119,11 +119,14 @@ impl DataState { let mut new_joining_points = Vec::new(); - for idx in 0..100 { + let num_points = 50; + for idx in (0..num_points).rev() { new_joining_points.push(network::NetworkJoinPoint { - rx: prev_data.0.rx as f64 + rx_diff / 100.0 * idx as f64, - tx: prev_data.0.tx as f64 + tx_diff / 100.0 * idx as f64, - time_offset_milliseconds: time_gap / 100.0 * (100 - idx) as f64, + rx: prev_data.0.rx as f64 + + rx_diff / num_points as f64 * (num_points - idx) as f64, + tx: prev_data.0.tx as f64 + + tx_diff / num_points as f64 * (num_points - idx) as f64, + time_offset_milliseconds: time_gap / num_points as f64 * idx as f64, }); } Some(new_joining_points) diff --git a/src/canvas.rs b/src/canvas.rs index 4d087b9e..3734573e 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -131,11 +131,11 @@ fn gen_n_colours(num_to_gen: i32) -> Vec { // Generate colours let mut colour_vec: Vec = vec![ - Color::LightCyan, - Color::LightYellow, Color::Red, - Color::Green, + Color::LightYellow, Color::LightMagenta, + Color::LightCyan, + Color::Green, ]; let mut h: f32 = 0.4; // We don't need random colours... right? From e6b6048afb3ddfa89d308742f0be1eb5478ecf5c Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 23:10:32 -0500 Subject: [PATCH 08/26] Further tweaking of network --- src/app/data_collection.rs | 19 +++++++++++++++++-- src/app/data_collection/network.rs | 19 ++++++++++++------- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 8d3c4dbe..0d07e084 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -37,6 +37,22 @@ pub struct Data { pub list_of_disks: Vec, } +impl Data { + pub fn first_run_cleanup(&mut self) { + self.list_of_cpu_packages = Vec::new(); + self.list_of_io = Vec::new(); + self.list_of_physical_io = Vec::new(); + self.memory = Vec::new(); + self.swap = Vec::new(); + self.list_of_temperature_sensor = Vec::new(); + self.list_of_processes = Vec::new(); + self.grouped_list_of_processes = None; + self.list_of_disks = Vec::new(); + + self.network.first_run(); + } +} + pub struct DataState { pub data: Data, first_run: bool, @@ -110,7 +126,6 @@ impl DataState { .get(&self.data.network.last_collection_time) { // If not empty, inject joining points - let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; let time_gap = current_instant @@ -188,7 +203,7 @@ impl DataState { ); if self.first_run { - self.data = Data::default(); + self.data.first_run_cleanup(); self.first_run = false; } diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index c9b97de6..da646ad2 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -35,6 +35,14 @@ impl Default for NetworkStorage { } } +impl NetworkStorage { + pub fn first_run(&mut self) { + self.data_points = BTreeMap::default(); + self.rx = 0; + self.tx = 0; + } +} + #[derive(Clone, Debug)] /// Note all values are in bytes... pub struct NetworkData { @@ -68,13 +76,10 @@ pub async fn get_network_data( .duration_since(*prev_net_access_time) .as_secs_f64(); - if *prev_net_rx == 0 { - *prev_net_rx = net_rx; - } - - if *prev_net_tx == 0 { - *prev_net_tx = net_tx; - } + debug!( + "net rx: {}, net tx: {}, net prev rx: {}, net prev tx: {}", + net_rx, net_tx, *prev_net_rx, *prev_net_tx + ); let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; From fe99b99d0af3435636562dc4ca80a944e74ac926 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 21 Jan 2020 23:35:16 -0500 Subject: [PATCH 09/26] Removed btreemap and went back to vec as it makes more sense for us --- src/app/data_collection.rs | 10 +++------- src/app/data_collection/network.rs | 13 ++++--------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/src/app/data_collection.rs b/src/app/data_collection.rs index 0d07e084..433f43d6 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_collection.rs @@ -119,13 +119,9 @@ impl DataState { let joining_points: Option> = if !self.data.network.data_points.is_empty() { - if let Some(prev_data) = self - .data - .network - .data_points - .get(&self.data.network.last_collection_time) - { + if let Some(last_entry) = self.data.network.data_points.last() { // If not empty, inject joining points + let prev_data = &last_entry.1; let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; let time_gap = current_instant @@ -161,7 +157,7 @@ impl DataState { self.data .network .data_points - .insert(current_instant, (new_network_data, joining_points)); + .push((current_instant, (new_network_data, joining_points))); // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! push_if_valid( diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs index da646ad2..b1695409 100644 --- a/src/app/data_collection/network.rs +++ b/src/app/data_collection/network.rs @@ -1,7 +1,6 @@ use futures::StreamExt; use heim::net; use heim::units::information::byte; -use std::collections::BTreeMap; use std::time::Instant; use sysinfo::{NetworkExt, System, SystemExt}; @@ -12,9 +11,10 @@ pub struct NetworkJoinPoint { pub time_offset_milliseconds: f64, } +type NetworkDataGroup = (Instant, (NetworkData, Option>)); #[derive(Clone, Debug)] pub struct NetworkStorage { - pub data_points: BTreeMap>)>, + pub data_points: Vec, pub rx: u64, pub tx: u64, pub total_rx: u64, @@ -25,7 +25,7 @@ pub struct NetworkStorage { impl Default for NetworkStorage { fn default() -> Self { NetworkStorage { - data_points: BTreeMap::default(), + data_points: Vec::default(), rx: 0, tx: 0, total_rx: 0, @@ -37,7 +37,7 @@ impl Default for NetworkStorage { impl NetworkStorage { pub fn first_run(&mut self) { - self.data_points = BTreeMap::default(); + self.data_points = Vec::default(); self.rx = 0; self.tx = 0; } @@ -76,11 +76,6 @@ pub async fn get_network_data( .duration_since(*prev_net_access_time) .as_secs_f64(); - debug!( - "net rx: {}, net tx: {}, net prev rx: {}, net prev tx: {}", - net_rx, net_tx, *prev_net_rx, *prev_net_tx - ); - let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; From 13f6dfc529bf6702201c67aabec1d637f368e8c5 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sat, 25 Jan 2020 16:36:14 -0500 Subject: [PATCH 10/26] Reworked network again; will use this to change all widgets --- src/app.rs | 32 ++++- src/app/data_collection/network.rs | 86 ------------- .../{data_collection.rs => data_harvester.rs} | 79 ++++-------- .../cpu.rs | 0 .../disks.rs | 0 .../mem.rs | 0 src/app/data_harvester/network.rs | 55 ++++++++ .../processes.rs | 0 .../temperature.rs | 0 src/app/data_janitor.rs | 120 ++++++++++++++++++ src/canvas.rs | 5 +- src/data_conversion.rs | 120 ++++++++---------- src/main.rs | 43 ++++--- 13 files changed, 300 insertions(+), 240 deletions(-) delete mode 100644 src/app/data_collection/network.rs rename src/app/{data_collection.rs => data_harvester.rs} (74%) rename src/app/{data_collection => data_harvester}/cpu.rs (100%) rename src/app/{data_collection => data_harvester}/disks.rs (100%) rename src/app/{data_collection => data_harvester}/mem.rs (100%) create mode 100644 src/app/data_harvester/network.rs rename src/app/{data_collection => data_harvester}/processes.rs (100%) rename src/app/{data_collection => data_harvester}/temperature.rs (100%) create mode 100644 src/app/data_janitor.rs diff --git a/src/app.rs b/src/app.rs index 244f3872..c6c13691 100644 --- a/src/app.rs +++ b/src/app.rs @@ -1,7 +1,10 @@ -pub mod data_collection; -use data_collection::{processes, temperature}; +pub mod data_harvester; +use data_harvester::{processes, temperature}; use std::time::Instant; +pub mod data_janitor; +use data_janitor::*; + use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; mod process_killer; @@ -30,6 +33,23 @@ lazy_static! { regex::Regex::new(".*"); } +/// AppConfigFields is meant to cover basic fields that would normally be set +/// by config files or launch options. Don't need to be mutable (set and forget). +pub struct AppConfigFields { + pub update_rate_in_milliseconds: u64, + pub temperature_type: temperature::TemperatureType, + pub use_dot: bool, +} + +/// AppScrollWidgetState deals with fields for a scrollable app's current state. +pub struct AppScrollWidgetState { + pub widget_scroll_position: i64, +} + +/// AppSearchState only deals with the search's state. +pub struct AppSearchState {} + +// TODO: [OPT] Group like fields together... this is kinda gross to step through pub struct App { // Sorting pub process_sorting_type: processes::ProcessSorting, @@ -49,7 +69,7 @@ pub struct App { pub update_rate_in_milliseconds: u64, pub show_average_cpu: bool, pub current_application_position: ApplicationPosition, - pub data: data_collection::Data, + pub data: data_harvester::Data, awaiting_second_char: bool, second_char: char, pub use_dot: bool, @@ -63,12 +83,13 @@ pub struct App { last_key_press: Instant, pub canvas_data: canvas::CanvasData, enable_grouping: bool, - enable_searching: bool, // TODO: [OPT] group together? + enable_searching: bool, current_search_query: String, searching_pid: bool, pub use_simple: bool, current_regex: std::result::Result, current_cursor_position: usize, + pub data_collection: DataCollection, } impl App { @@ -94,7 +115,7 @@ impl App { previous_disk_position: 0, previous_temp_position: 0, previous_cpu_table_position: 0, - data: data_collection::Data::default(), + data: data_harvester::Data::default(), awaiting_second_char: false, second_char: ' ', use_dot, @@ -114,6 +135,7 @@ impl App { use_simple: false, current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning current_cursor_position: 0, + data_collection: DataCollection::default(), } } diff --git a/src/app/data_collection/network.rs b/src/app/data_collection/network.rs deleted file mode 100644 index b1695409..00000000 --- a/src/app/data_collection/network.rs +++ /dev/null @@ -1,86 +0,0 @@ -use futures::StreamExt; -use heim::net; -use heim::units::information::byte; -use std::time::Instant; -use sysinfo::{NetworkExt, System, SystemExt}; - -#[derive(Clone, Debug)] -pub struct NetworkJoinPoint { - pub rx: f64, - pub tx: f64, - pub time_offset_milliseconds: f64, -} - -type NetworkDataGroup = (Instant, (NetworkData, Option>)); -#[derive(Clone, Debug)] -pub struct NetworkStorage { - pub data_points: Vec, - pub rx: u64, - pub tx: u64, - pub total_rx: u64, - pub total_tx: u64, - pub last_collection_time: Instant, -} - -impl Default for NetworkStorage { - fn default() -> Self { - NetworkStorage { - data_points: Vec::default(), - rx: 0, - tx: 0, - total_rx: 0, - total_tx: 0, - last_collection_time: Instant::now(), - } - } -} - -impl NetworkStorage { - pub fn first_run(&mut self) { - self.data_points = Vec::default(); - self.rx = 0; - self.tx = 0; - } -} - -#[derive(Clone, Debug)] -/// Note all values are in bytes... -pub struct NetworkData { - pub rx: u64, - pub tx: u64, -} - -pub async fn get_network_data( - sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, - curr_time: &Instant, -) -> NetworkData { - // FIXME: [WIN] Track current total bytes... also is this accurate? - if cfg!(target_os = "windows") { - let network_data = sys.get_network(); - NetworkData { - rx: network_data.get_income(), - tx: network_data.get_outcome(), - } - } else { - let mut io_data = net::io_counters(); - let mut net_rx: u64 = 0; - let mut net_tx: u64 = 0; - - while let Some(io) = io_data.next().await { - if let Ok(io) = io { - net_rx += io.bytes_recv().get::(); - net_tx += io.bytes_sent().get::(); - } - } - let elapsed_time = curr_time - .duration_since(*prev_net_access_time) - .as_secs_f64(); - - let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64; - let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64; - - *prev_net_rx = net_rx; - *prev_net_tx = net_tx; - NetworkData { rx, tx } - } -} diff --git a/src/app/data_collection.rs b/src/app/data_harvester.rs similarity index 74% rename from src/app/data_collection.rs rename to src/app/data_harvester.rs index 433f43d6..c31f4962 100644 --- a/src/app/data_collection.rs +++ b/src/app/data_harvester.rs @@ -23,39 +23,52 @@ fn push_if_valid(result: &Result, vector_to_push: &mut } } -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug)] pub struct Data { pub list_of_cpu_packages: Vec, pub list_of_io: Vec, - pub list_of_physical_io: Vec, pub memory: Vec, pub swap: Vec, pub list_of_temperature_sensor: Vec, - pub network: network::NetworkStorage, + pub network: network::NetworkHarvest, pub list_of_processes: Vec, pub grouped_list_of_processes: Option>, pub list_of_disks: Vec, + pub last_collection_time: Instant, +} + +impl Default for Data { + fn default() -> Self { + Data { + list_of_cpu_packages: Vec::default(), + list_of_io: Vec::default(), + memory: Vec::default(), + swap: Vec::default(), + list_of_temperature_sensor: Vec::default(), + list_of_processes: Vec::default(), + grouped_list_of_processes: None, + list_of_disks: Vec::default(), + network: network::NetworkHarvest::default(), + last_collection_time: Instant::now(), + } + } } impl Data { pub fn first_run_cleanup(&mut self) { self.list_of_cpu_packages = Vec::new(); self.list_of_io = Vec::new(); - self.list_of_physical_io = Vec::new(); self.memory = Vec::new(); self.swap = Vec::new(); self.list_of_temperature_sensor = Vec::new(); self.list_of_processes = Vec::new(); self.grouped_list_of_processes = None; self.list_of_disks = Vec::new(); - - self.network.first_run(); } } pub struct DataState { pub data: Data, - first_run: bool, sys: System, stale_max_seconds: u64, prev_pid_stats: HashMap, @@ -70,7 +83,6 @@ impl Default for DataState { fn default() -> Self { DataState { data: Data::default(), - first_run: true, sys: System::new(), stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000, prev_pid_stats: HashMap::new(), @@ -108,57 +120,15 @@ impl DataState { let current_instant = std::time::Instant::now(); // Network - let new_network_data = network::get_network_data( + self.data.network = network::get_network_data( &self.sys, - &self.data.network.last_collection_time, + &self.data.last_collection_time, &mut self.data.network.total_rx, &mut self.data.network.total_tx, ¤t_instant, ) .await; - let joining_points: Option> = - if !self.data.network.data_points.is_empty() { - if let Some(last_entry) = self.data.network.data_points.last() { - // If not empty, inject joining points - let prev_data = &last_entry.1; - let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64; - let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64; - let time_gap = current_instant - .duration_since(self.data.network.last_collection_time) - .as_millis() as f64; - - let mut new_joining_points = Vec::new(); - - let num_points = 50; - for idx in (0..num_points).rev() { - new_joining_points.push(network::NetworkJoinPoint { - rx: prev_data.0.rx as f64 - + rx_diff / num_points as f64 * (num_points - idx) as f64, - tx: prev_data.0.tx as f64 - + tx_diff / num_points as f64 * (num_points - idx) as f64, - time_offset_milliseconds: time_gap / num_points as f64 * idx as f64, - }); - } - Some(new_joining_points) - } else { - None - } - } else { - None - }; - - // Set values - self.data.network.rx = new_network_data.rx; - self.data.network.tx = new_network_data.tx; - self.data.network.last_collection_time = current_instant; - - // Add new point - self.data - .network - .data_points - .push((current_instant, (new_network_data, joining_points))); - // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! push_if_valid( &cpu::get_cpu_data_list(&self.sys, ¤t_instant), @@ -198,10 +168,7 @@ impl DataState { &mut self.data.list_of_temperature_sensor, ); - if self.first_run { - self.data.first_run_cleanup(); - self.first_run = false; - } + self.data.last_collection_time = current_instant; // Filter out stale timed entries let clean_instant = Instant::now(); diff --git a/src/app/data_collection/cpu.rs b/src/app/data_harvester/cpu.rs similarity index 100% rename from src/app/data_collection/cpu.rs rename to src/app/data_harvester/cpu.rs diff --git a/src/app/data_collection/disks.rs b/src/app/data_harvester/disks.rs similarity index 100% rename from src/app/data_collection/disks.rs rename to src/app/data_harvester/disks.rs diff --git a/src/app/data_collection/mem.rs b/src/app/data_harvester/mem.rs similarity index 100% rename from src/app/data_collection/mem.rs rename to src/app/data_harvester/mem.rs diff --git a/src/app/data_harvester/network.rs b/src/app/data_harvester/network.rs new file mode 100644 index 00000000..6be12590 --- /dev/null +++ b/src/app/data_harvester/network.rs @@ -0,0 +1,55 @@ +use futures::StreamExt; +use heim::net; +use heim::units::information::byte; +use std::time::Instant; +use sysinfo::{NetworkExt, System, SystemExt}; + +#[derive(Default, Clone, Debug)] +pub struct NetworkHarvest { + pub rx: u64, + pub tx: u64, + pub total_rx: u64, + pub total_tx: u64, +} + +pub async fn get_network_data( + sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, + curr_time: &Instant, +) -> NetworkHarvest { + // FIXME: [WIN] Track current total bytes... also is this accurate? + if cfg!(target_os = "windows") { + let network_data = sys.get_network(); + NetworkHarvest { + rx: network_data.get_income(), + tx: network_data.get_outcome(), + total_rx: 0, + total_tx: 0, + } + } else { + let mut io_data = net::io_counters(); + let mut total_rx: u64 = 0; + let mut total_tx: u64 = 0; + + while let Some(io) = io_data.next().await { + if let Ok(io) = io { + total_rx += io.bytes_recv().get::(); + total_tx += io.bytes_sent().get::(); + } + } + let elapsed_time = curr_time + .duration_since(*prev_net_access_time) + .as_secs_f64(); + + let rx = ((total_rx - *prev_net_rx) as f64 / elapsed_time) as u64; + let tx = ((total_tx - *prev_net_tx) as f64 / elapsed_time) as u64; + + *prev_net_rx = total_rx; + *prev_net_tx = total_tx; + NetworkHarvest { + rx, + tx, + total_rx, + total_tx, + } + } +} diff --git a/src/app/data_collection/processes.rs b/src/app/data_harvester/processes.rs similarity index 100% rename from src/app/data_collection/processes.rs rename to src/app/data_harvester/processes.rs diff --git a/src/app/data_collection/temperature.rs b/src/app/data_harvester/temperature.rs similarity index 100% rename from src/app/data_collection/temperature.rs rename to src/app/data_harvester/temperature.rs diff --git a/src/app/data_janitor.rs b/src/app/data_janitor.rs new file mode 100644 index 00000000..37b6a714 --- /dev/null +++ b/src/app/data_janitor.rs @@ -0,0 +1,120 @@ +use crate::{data_harvester::network, data_harvester::Data}; +/// In charge of cleaning and managing data. I couldn't think of a better +/// name for the file. +use std::time::Instant; +use std::vec::Vec; + +pub type TimeOffset = f64; +pub type Value = f64; +pub type JoinedDataPoints = (Value, Vec<(TimeOffset, Value)>); + +#[derive(Debug, Default)] +pub struct TimedData { + pub rx_data: JoinedDataPoints, + pub tx_data: JoinedDataPoints, + pub cpu_data: JoinedDataPoints, + pub mem_data: JoinedDataPoints, + pub swap_data: JoinedDataPoints, +} + +/// AppCollection represents the pooled data stored within the main app +/// thread. Basically stores a (occasionally cleaned) record of the data +/// collected, and what is needed to convert into a displayable form. +/// +/// If the app is *frozen* - that is, we do not want to *display* any changing +/// data, keep updating this, don't convert to canvas displayable data! +/// +/// Note that with this method, the *app* thread is responsible for cleaning - +/// not the data collector. +#[derive(Debug)] +pub struct DataCollection { + pub current_instant: Instant, + pub timed_data_vec: Vec<(Instant, TimedData)>, + pub network_harvest: network::NetworkHarvest, + // pub process_data: ProcessData, + // pub disk_data: DiskData, + // pub temp_data: TempData, +} + +impl Default for DataCollection { + fn default() -> Self { + DataCollection { + current_instant: Instant::now(), + timed_data_vec: Vec::default(), + network_harvest: network::NetworkHarvest::default(), + // process_data: ProcessData::default(), + } + } +} + +impl DataCollection { + pub fn clean_data(&mut self) {} + + pub fn eat_data(&mut self, harvested_data: &Data) { + let harvested_time = harvested_data.last_collection_time; + let mut new_entry = TimedData::default(); + + // RX + let rx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points( + &time, + last_pt.rx_data.0, + &harvested_time, + harvested_data.network.rx as f64, + ) + } else { + Vec::new() + }; + let rx_pt = (harvested_data.network.rx as f64, rx_joining_pts); + new_entry.rx_data = rx_pt; + + // TX + let tx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points( + &time, + last_pt.tx_data.0, + &harvested_time, + harvested_data.network.tx as f64, + ) + } else { + Vec::new() + }; + let tx_pt = (harvested_data.network.tx as f64, tx_joining_pts); + new_entry.tx_data = tx_pt; + + // Copy over data + self.network_harvest = harvested_data.network.clone(); + + // And we're done eating. + self.current_instant = harvested_time; + self.timed_data_vec.push((harvested_time, new_entry)); + } +} + +pub fn generate_joining_points( + start_x: &Instant, start_y: f64, end_x: &Instant, end_y: f64, +) -> Vec<(TimeOffset, Value)> { + let mut points: Vec<(TimeOffset, Value)> = Vec::new(); + + // Convert time floats first: + let time_difference = (*end_x).duration_since(*start_x).as_millis() as f64; + let value_difference = end_y - start_y; + + // Let's generate... about this many points! + let num_points = std::cmp::min( + std::cmp::max( + (value_difference.abs() / (time_difference + 0.0001) * 1000.0) as u64, + 100, + ), + 1000, + ); + + for itx in 0..num_points { + points.push(( + time_difference - (itx as f64 / num_points as f64 * time_difference), + start_y + (itx as f64 / num_points as f64 * value_difference), + )); + } + + points +} diff --git a/src/canvas.rs b/src/canvas.rs index 3734573e..578241a9 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -334,7 +334,6 @@ pub fn draw_data( } else { 5 }; - debug!("Req: {}", required); let remaining = bottom_chunks[0].height - required; [Constraint::Length(remaining), Constraint::Length(required)] } @@ -665,7 +664,7 @@ fn draw_network_graph(f: &mut Frame, app_state: &app::Ap let x_axis: Axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) - .bounds([0.0, 600_000.0]); + .bounds([0.0, 60_000.0]); let y_axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) .bounds([-0.5, 30_f64]) @@ -1035,7 +1034,7 @@ fn draw_processes_table( ) }); - use app::data_collection::processes::ProcessSorting; + use app::data_harvester::processes::ProcessSorting; let mut pid_or_name = if app_state.is_grouped() { "Count" } else { diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 58322d97..824f1e02 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -2,12 +2,14 @@ //! can actually handle. use crate::{ - app::data_collection, + app::data_harvester, + app::data_janitor, constants, utils::gen_util::{get_exact_byte_values, get_simple_byte_values}, }; use constants::*; use regex::Regex; +use std::time::Instant; #[derive(Default, Debug)] pub struct ConvertedNetworkData { @@ -55,7 +57,7 @@ impl From<&CpuPoint> for (f64, f64) { } pub fn update_temp_row( - app_data: &data_collection::Data, temp_type: &data_collection::temperature::TemperatureType, + app_data: &data_harvester::Data, temp_type: &data_harvester::temperature::TemperatureType, ) -> Vec> { let mut sensor_vector: Vec> = Vec::new(); @@ -67,9 +69,9 @@ pub fn update_temp_row( sensor.component_name.to_string(), (sensor.temperature.ceil() as u64).to_string() + match temp_type { - data_collection::temperature::TemperatureType::Celsius => "C", - data_collection::temperature::TemperatureType::Kelvin => "K", - data_collection::temperature::TemperatureType::Fahrenheit => "F", + data_harvester::temperature::TemperatureType::Celsius => "C", + data_harvester::temperature::TemperatureType::Kelvin => "K", + data_harvester::temperature::TemperatureType::Fahrenheit => "F", }, ]); } @@ -78,7 +80,7 @@ pub fn update_temp_row( sensor_vector } -pub fn update_disk_row(app_data: &data_collection::Data) -> Vec> { +pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec> { let mut disk_vector: Vec> = Vec::new(); for disk in &app_data.list_of_disks { let io_activity = { @@ -141,7 +143,7 @@ pub fn update_disk_row(app_data: &data_collection::Data) -> Vec> { } pub fn simple_update_process_row( - app_data: &data_collection::Data, matching_string: &str, use_pid: bool, + app_data: &data_harvester::Data, matching_string: &str, use_pid: bool, ) -> (Vec, Vec) { let process_vector: Vec = app_data .list_of_processes @@ -183,7 +185,7 @@ pub fn simple_update_process_row( } pub fn regex_update_process_row( - app_data: &data_collection::Data, regex_matcher: &std::result::Result, + app_data: &data_harvester::Data, regex_matcher: &std::result::Result, use_pid: bool, ) -> (Vec, Vec) { let process_vector: Vec = app_data @@ -226,7 +228,7 @@ pub fn regex_update_process_row( } fn return_mapped_process( - process: &data_collection::processes::ProcessData, app_data: &data_collection::Data, + process: &data_harvester::processes::ProcessData, app_data: &data_harvester::Data, ) -> ConvertedProcessData { ConvertedProcessData { pid: process.pid, @@ -251,7 +253,7 @@ fn return_mapped_process( } pub fn update_cpu_data_points( - show_avg_cpu: bool, app_data: &data_collection::Data, + show_avg_cpu: bool, app_data: &data_harvester::Data, ) -> Vec { let mut cpu_data_vector: Vec = Vec::new(); let mut cpu_collection: Vec> = Vec::new(); @@ -264,7 +266,7 @@ pub fn update_cpu_data_points( let mut this_cpu_data: Vec = Vec::new(); for data in &app_data.list_of_cpu_packages { - let current_time = std::time::Instant::now(); + let current_time = Instant::now(); let current_cpu_usage = data.cpu_vec[cpu_num].cpu_usage; let new_entry = CpuPoint { @@ -329,15 +331,15 @@ pub fn update_cpu_data_points( cpu_data_vector } -pub fn update_mem_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> { +pub fn update_mem_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> { convert_mem_data(&app_data.memory) } -pub fn update_swap_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> { +pub fn update_swap_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> { convert_mem_data(&app_data.swap) } -pub fn update_mem_data_values(app_data: &data_collection::Data) -> Vec<(u64, u64)> { +pub fn update_mem_data_values(app_data: &data_harvester::Data) -> Vec<(u64, u64)> { let mut result: Vec<(u64, u64)> = Vec::new(); result.push(get_most_recent_mem_values(&app_data.memory)); result.push(get_most_recent_mem_values(&app_data.swap)); @@ -345,7 +347,7 @@ pub fn update_mem_data_values(app_data: &data_collection::Data) -> Vec<(u64, u64 result } -fn get_most_recent_mem_values(mem_data: &[data_collection::mem::MemData]) -> (u64, u64) { +fn get_most_recent_mem_values(mem_data: &[data_harvester::mem::MemData]) -> (u64, u64) { let mut result: (u64, u64) = (0, 0); if !mem_data.is_empty() { @@ -358,7 +360,7 @@ fn get_most_recent_mem_values(mem_data: &[data_collection::mem::MemData]) -> (u6 result } -fn convert_mem_data(mem_data: &[data_collection::mem::MemData]) -> Vec<(f64, f64)> { +fn convert_mem_data(mem_data: &[data_harvester::mem::MemData]) -> Vec<(f64, f64)> { let mut result: Vec<(f64, f64)> = Vec::new(); for data in mem_data { @@ -394,67 +396,45 @@ fn convert_mem_data(mem_data: &[data_collection::mem::MemData]) -> Vec<(f64, f64 result } -pub fn update_network_data_points(app_data: &data_collection::Data) -> ConvertedNetworkData { - convert_network_data_points(&app_data.network) -} - pub fn convert_network_data_points( - network_data: &data_collection::network::NetworkStorage, + current_data: &data_janitor::DataCollection, ) -> ConvertedNetworkData { let mut rx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new(); - let current_time = network_data.last_collection_time; - for (time, data) in &network_data.data_points { - let time_from_start: f64 = ((TIME_STARTS_FROM as f64 + let current_time = current_data.current_instant; + for (time, data) in ¤t_data.timed_data_vec { + let time_from_start: f64 = (TIME_STARTS_FROM as f64 - current_time.duration_since(*time).as_millis() as f64) - * 10_f64) .floor(); - // Insert in joiner points - if let Some(joiners) = &data.1 { - for joiner in joiners { - let offset_time = time_from_start - joiner.time_offset_milliseconds as f64 * 10_f64; - rx.push(( - offset_time, - if joiner.rx > 0.0 { - (joiner.rx).log(2.0) - } else { - 0.0 - }, - )); - - tx.push(( - offset_time, - if joiner.tx > 0.0 { - (joiner.tx).log(2.0) - } else { - 0.0 - }, - )); - } + //Insert joiner points + for &(joiner_offset, joiner_val) in &data.rx_data.1 { + let offset_time = time_from_start - joiner_offset as f64; + rx.push(( + offset_time, + if joiner_val > 0.0 { + (joiner_val).log(2.0) + } else { + 0.0 + }, + )); } - // Insert in main points - let rx_data = ( - time_from_start, - if data.0.rx > 0 { - (data.0.rx as f64).log(2.0) - } else { - 0.0 - }, - ); - let tx_data = ( - time_from_start, - if data.0.tx > 0 { - (data.0.tx as f64).log(2.0) - } else { - 0.0 - }, - ); + for &(joiner_offset, joiner_val) in &data.tx_data.1 { + let offset_time = time_from_start - joiner_offset as f64; + tx.push(( + offset_time, + if joiner_val > 0.0 { + (joiner_val).log(2.0) + } else { + 0.0 + }, + )); + } - rx.push(rx_data); - tx.push(tx_data); + rx.push((time_from_start, data.rx_data.0)); + tx.push((time_from_start, data.tx_data.0)); } let total_rx_converted_result: (f64, String); @@ -462,8 +442,8 @@ pub fn convert_network_data_points( let total_tx_converted_result: (f64, String); let tx_converted_result: (f64, String); - rx_converted_result = get_exact_byte_values(network_data.rx, false); - total_rx_converted_result = get_exact_byte_values(network_data.total_rx, false); + rx_converted_result = get_exact_byte_values(current_data.network_harvest.rx, false); + total_rx_converted_result = get_exact_byte_values(current_data.network_harvest.total_rx, false); let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1); let total_rx_display = if cfg!(not(target_os = "windows")) { format!( @@ -474,8 +454,8 @@ pub fn convert_network_data_points( "N/A".to_string() }; - tx_converted_result = get_exact_byte_values(network_data.tx, false); - total_tx_converted_result = get_exact_byte_values(network_data.total_tx, false); + tx_converted_result = get_exact_byte_values(current_data.network_harvest.tx, false); + total_tx_converted_result = get_exact_byte_values(current_data.network_harvest.total_tx, false); let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1); let total_tx_display = if cfg!(not(target_os = "windows")) { format!( diff --git a/src/main.rs b/src/main.rs index 043ddcf5..7d2e005e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,8 +35,8 @@ mod canvas; mod constants; mod data_conversion; -use app::data_collection; -use app::data_collection::processes::ProcessData; +use app::data_harvester; +use app::data_harvester::processes::ProcessData; use constants::TICK_RATE_IN_MILLISECONDS; use data_conversion::*; use std::collections::BTreeMap; @@ -45,7 +45,7 @@ use utils::error::{self, BottomError}; enum Event { KeyInput(I), MouseInput(J), - Update(Box), + Update(Box), } enum ResetEvent { @@ -104,11 +104,11 @@ fn main() -> error::Result<()> { // Set other settings let temperature_type = if matches.is_present("FAHRENHEIT") { - data_collection::temperature::TemperatureType::Fahrenheit + data_harvester::temperature::TemperatureType::Fahrenheit } else if matches.is_present("KELVIN") { - data_collection::temperature::TemperatureType::Kelvin + data_harvester::temperature::TemperatureType::Kelvin } else { - data_collection::temperature::TemperatureType::Celsius + data_harvester::temperature::TemperatureType::Celsius }; let show_average_cpu = matches.is_present("AVG_CPU"); let use_dot = matches.is_present("DOT_MARKER"); @@ -183,7 +183,7 @@ fn main() -> error::Result<()> { let temp_type = app.temperature_type.clone(); thread::spawn(move || { let tx = tx.clone(); - let mut data_state = data_collection::DataState::default(); + let mut data_state = data_harvester::DataState::default(); data_state.init(); data_state.set_temperature_type(temp_type); data_state.set_use_current_cpu_total(use_current_cpu_total); @@ -193,21 +193,22 @@ fn main() -> error::Result<()> { ResetEvent::Reset => { //debug!("Received reset message"); first_run = true; - data_state.data = app::data_collection::Data::default(); + data_state.data = app::data_harvester::Data::default(); } } } futures::executor::block_on(data_state.update_data()); - tx.send(Event::Update(Box::from(data_state.data.clone()))) - .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it if first_run { // Fix for if you set a really long time for update periods (and just gives a faster first value) + data_state.data.first_run_cleanup(); // TODO: [OPT] we can remove this later. thread::sleep(Duration::from_millis(250)); + futures::executor::block_on(data_state.update_data()); first_run = false; - } else { - thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64)); } + tx.send(Event::Update(Box::from(data_state.data.clone()))) + .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it + thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64)); } }); } @@ -277,12 +278,14 @@ fn main() -> error::Result<()> { // NOTE TO SELF - data is refreshed into app state HERE! That means, if it is // frozen, then, app.data is never refreshed, until unfrozen! if !app.is_frozen { + app.data_collection.eat_data(&data); + app.data = *data; handle_process_sorting(&mut app); // Convert all data into tui components - let network_data = update_network_data_points(&app.data); + let network_data = convert_network_data_points(&app.data_collection); app.canvas_data.network_data_rx = network_data.rx; app.canvas_data.network_data_tx = network_data.tx; app.canvas_data.rx_display = network_data.rx_display; @@ -303,9 +306,9 @@ fn main() -> error::Result<()> { } // Quick fix for tab updating the table headers - if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type { + if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type { if app.is_grouped() { - app.process_sorting_type = data_collection::processes::ProcessSorting::CPU; // Go back to default, negate PID for group + app.process_sorting_type = data_harvester::processes::ProcessSorting::CPU; // Go back to default, negate PID for group app.process_sorting_reverse = true; } } @@ -372,14 +375,14 @@ fn handle_process_sorting(app: &mut app::App) { ); if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes { - if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type { - data_collection::processes::sort_processes( + if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type { + data_harvester::processes::sort_processes( grouped_list_of_processes, - &data_collection::processes::ProcessSorting::CPU, // Go back to default, negate PID for group + &data_harvester::processes::ProcessSorting::CPU, // Go back to default, negate PID for group true, ); } else { - data_collection::processes::sort_processes( + data_harvester::processes::sort_processes( grouped_list_of_processes, &app.process_sorting_type, app.process_sorting_reverse, @@ -387,7 +390,7 @@ fn handle_process_sorting(app: &mut app::App) { } } - data_collection::processes::sort_processes( + data_harvester::processes::sort_processes( &mut app.data.list_of_processes, &app.process_sorting_type, app.process_sorting_reverse, From 10b7aa6f7880647ff3ec297f871328be6daa7019 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 26 Jan 2020 01:51:54 -0500 Subject: [PATCH 11/26] Converted mem over, will need to move the label over too --- src/app/data_harvester.rs | 60 +++++------ src/app/data_harvester/mem.rs | 23 +++-- src/app/data_harvester/network.rs | 7 ++ src/app/data_harvester/processes.rs | 27 ++--- src/app/data_janitor.rs | 62 ++++++++++-- src/canvas.rs | 3 +- src/data_conversion.rs | 149 +++++++++++++--------------- src/main.rs | 47 +++------ 8 files changed, 193 insertions(+), 185 deletions(-) diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index c31f4962..100c3a9c 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -27,8 +27,8 @@ fn push_if_valid(result: &Result, vector_to_push: &mut pub struct Data { pub list_of_cpu_packages: Vec, pub list_of_io: Vec, - pub memory: Vec, - pub swap: Vec, + pub memory: mem::MemHarvest, + pub swap: mem::MemHarvest, pub list_of_temperature_sensor: Vec, pub network: network::NetworkHarvest, pub list_of_processes: Vec, @@ -42,8 +42,8 @@ impl Default for Data { Data { list_of_cpu_packages: Vec::default(), list_of_io: Vec::default(), - memory: Vec::default(), - swap: Vec::default(), + memory: mem::MemHarvest::default(), + swap: mem::MemHarvest::default(), list_of_temperature_sensor: Vec::default(), list_of_processes: Vec::default(), grouped_list_of_processes: None, @@ -58,12 +58,14 @@ impl Data { pub fn first_run_cleanup(&mut self) { self.list_of_cpu_packages = Vec::new(); self.list_of_io = Vec::new(); - self.memory = Vec::new(); - self.swap = Vec::new(); self.list_of_temperature_sensor = Vec::new(); self.list_of_processes = Vec::new(); self.grouped_list_of_processes = None; self.list_of_disks = Vec::new(); + + self.network.first_run_cleanup(); + self.memory = mem::MemHarvest::default(); + self.swap = mem::MemHarvest::default(); } } @@ -74,6 +76,7 @@ pub struct DataState { prev_pid_stats: HashMap, prev_idle: f64, prev_non_idle: f64, + mem_total_kb: u64, temperature_type: temperature::TemperatureType, last_clean: Instant, // Last time stale data was cleared use_current_cpu_total: bool, @@ -88,6 +91,7 @@ impl Default for DataState { prev_pid_stats: HashMap::new(), prev_idle: 0_f64, prev_non_idle: 0_f64, + mem_total_kb: 0, temperature_type: temperature::TemperatureType::Celsius, last_clean: Instant::now(), use_current_cpu_total: false, @@ -106,6 +110,9 @@ impl DataState { pub fn init(&mut self) { self.sys.refresh_all(); + self.mem_total_kb = self.sys.get_total_memory(); + futures::executor::block_on(self.update_data()); + self.data.first_run_cleanup(); } pub async fn update_data(&mut self) { @@ -129,20 +136,20 @@ impl DataState { ) .await; + // Mem and swap + if let Ok(memory) = mem::get_mem_data_list().await { + self.data.memory = memory; + } + + if let Ok(swap) = mem::get_swap_data_list().await { + self.data.swap = swap; + } + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! push_if_valid( &cpu::get_cpu_data_list(&self.sys, ¤t_instant), &mut self.data.list_of_cpu_packages, ); - - push_if_valid( - &mem::get_mem_data_list(¤t_instant).await, - &mut self.data.memory, - ); - push_if_valid( - &mem::get_swap_data_list(¤t_instant).await, - &mut self.data.swap, - ); set_if_valid( &processes::get_sorted_processes_list( &self.sys, @@ -150,6 +157,7 @@ impl DataState { &mut self.prev_non_idle, &mut self.prev_pid_stats, self.use_current_cpu_total, + self.mem_total_kb, ¤t_instant, ), &mut self.data.list_of_processes, @@ -185,8 +193,6 @@ impl DataState { self.prev_pid_stats.remove(&stale); } - // TODO: [OPT] cleaning stale network - self.data.list_of_cpu_packages = self .data .list_of_cpu_packages @@ -197,26 +203,6 @@ impl DataState { }) .collect::>(); - self.data.memory = self - .data - .memory - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - - self.data.swap = self - .data - .swap - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - self.data.list_of_io = self .data .list_of_io diff --git a/src/app/data_harvester/mem.rs b/src/app/data_harvester/mem.rs index 15d9c41f..4bd3a0db 100644 --- a/src/app/data_harvester/mem.rs +++ b/src/app/data_harvester/mem.rs @@ -1,30 +1,35 @@ use heim::units::information; -use std::time::Instant; #[derive(Debug, Clone)] -pub struct MemData { +pub struct MemHarvest { pub mem_total_in_mb: u64, pub mem_used_in_mb: u64, - pub instant: Instant, } -pub async fn get_mem_data_list(curr_time: &Instant) -> crate::utils::error::Result { +impl Default for MemHarvest { + fn default() -> Self { + MemHarvest { + mem_total_in_mb: 0, + mem_used_in_mb: 0, + } + } +} + +pub async fn get_mem_data_list() -> crate::utils::error::Result { let memory = heim::memory::memory().await?; - Ok(MemData { + Ok(MemHarvest { mem_total_in_mb: memory.total().get::(), mem_used_in_mb: memory.total().get::() - memory.available().get::(), - instant: *curr_time, }) } -pub async fn get_swap_data_list(curr_time: &Instant) -> crate::utils::error::Result { +pub async fn get_swap_data_list() -> crate::utils::error::Result { let memory = heim::memory::swap().await?; - Ok(MemData { + Ok(MemHarvest { mem_total_in_mb: memory.total().get::(), mem_used_in_mb: memory.used().get::(), - instant: *curr_time, }) } diff --git a/src/app/data_harvester/network.rs b/src/app/data_harvester/network.rs index 6be12590..a2aea768 100644 --- a/src/app/data_harvester/network.rs +++ b/src/app/data_harvester/network.rs @@ -12,6 +12,13 @@ pub struct NetworkHarvest { pub total_tx: u64, } +impl NetworkHarvest { + pub fn first_run_cleanup(&mut self) { + self.rx = 0; + self.tx = 0; + } +} + pub async fn get_network_data( sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64, curr_time: &Instant, diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs index 069c1b29..25d3a6d4 100644 --- a/src/app/data_harvester/processes.rs +++ b/src/app/data_harvester/processes.rs @@ -21,8 +21,7 @@ impl Default for ProcessSorting { pub struct ProcessData { pub pid: u32, pub cpu_usage_percent: f64, - pub mem_usage_percent: Option, - pub mem_usage_kb: Option, + pub mem_usage_percent: f64, pub name: String, pub pid_vec: Option>, } @@ -188,9 +187,8 @@ fn convert_ps( return Ok(ProcessData { pid: 0, name: "".to_string(), - mem_usage_percent: None, - mem_usage_kb: None, - cpu_usage_percent: 0_f64, + mem_usage_percent: 0.0, + cpu_usage_percent: 0.0, pid_vec: None, }); } @@ -201,19 +199,16 @@ fn convert_ps( .parse::() .unwrap_or(0); let name = (&process[11..61]).trim().to_string(); - let mem_usage_percent = Some( - (&process[62..]) - .trim() - .to_string() - .parse::() - .unwrap_or(0_f64), - ); + let mem_usage_percent = (&process[62..]) + .trim() + .to_string() + .parse::() + .unwrap_or(0_f64); Ok(ProcessData { pid, name, mem_usage_percent, - mem_usage_kb: None, cpu_usage_percent: linux_cpu_usage( pid, cpu_usage, @@ -229,7 +224,7 @@ fn convert_ps( pub fn get_sorted_processes_list( sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64, prev_pid_stats: &mut std::collections::HashMap, - use_current_cpu_total: bool, curr_time: &Instant, + use_current_cpu_total: bool, mem_total_kb: u64, curr_time: &Instant, ) -> crate::utils::error::Result> { let mut process_vector: Vec = Vec::new(); @@ -241,7 +236,6 @@ pub fn get_sorted_processes_list( .output()?; let ps_stdout = String::from_utf8_lossy(&ps_result.stdout); let split_string = ps_stdout.split('\n'); - //debug!("{:?}", split_string); let cpu_calc = cpu_usage_calculation(prev_idle, prev_non_idle); if let Ok((cpu_usage, cpu_percentage)) = cpu_calc { let process_stream = split_string.collect::>(); @@ -291,8 +285,7 @@ pub fn get_sorted_processes_list( process_vector.push(ProcessData { pid: process_val.pid() as u32, name, - mem_usage_percent: None, - mem_usage_kb: Some(process_val.memory()), + mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64, cpu_usage_percent: f64::from(process_val.cpu_usage()), pid_vec: None, }); diff --git a/src/app/data_janitor.rs b/src/app/data_janitor.rs index 37b6a714..e87ec947 100644 --- a/src/app/data_janitor.rs +++ b/src/app/data_janitor.rs @@ -1,4 +1,4 @@ -use crate::{data_harvester::network, data_harvester::Data}; +use crate::data_harvester::{mem, network, Data}; /// In charge of cleaning and managing data. I couldn't think of a better /// name for the file. use std::time::Instant; @@ -15,6 +15,8 @@ pub struct TimedData { pub cpu_data: JoinedDataPoints, pub mem_data: JoinedDataPoints, pub swap_data: JoinedDataPoints, + pub temp_data: JoinedDataPoints, + pub io_data: JoinedDataPoints, } /// AppCollection represents the pooled data stored within the main app @@ -31,9 +33,9 @@ pub struct DataCollection { pub current_instant: Instant, pub timed_data_vec: Vec<(Instant, TimedData)>, pub network_harvest: network::NetworkHarvest, + pub memory_harvest: mem::MemHarvest, + pub swap_harvest: mem::MemHarvest, // pub process_data: ProcessData, - // pub disk_data: DiskData, - // pub temp_data: TempData, } impl Default for DataCollection { @@ -42,6 +44,8 @@ impl Default for DataCollection { current_instant: Instant::now(), timed_data_vec: Vec::default(), network_harvest: network::NetworkHarvest::default(), + memory_harvest: mem::MemHarvest::default(), + swap_harvest: mem::MemHarvest::default(), // process_data: ProcessData::default(), } } @@ -54,6 +58,52 @@ impl DataCollection { let harvested_time = harvested_data.last_collection_time; let mut new_entry = TimedData::default(); + // Network + self.eat_network(&harvested_data, &harvested_time, &mut new_entry); + + // Memory and Swap + self.eat_memory_and_swap(&harvested_data, &harvested_time, &mut new_entry); + + // And we're done eating. + self.current_instant = harvested_time; + self.timed_data_vec.push((harvested_time, new_entry)); + } + + fn eat_memory_and_swap( + &mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData, + ) { + // Memory + let mem_percent = harvested_data.memory.mem_used_in_mb as f64 + / harvested_data.memory.mem_total_in_mb as f64 + * 100.0; + let mem_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points(&time, last_pt.mem_data.0, &harvested_time, mem_percent) + } else { + Vec::new() + }; + let mem_pt = (mem_percent, mem_joining_pts); + new_entry.mem_data = mem_pt; + + // Swap + let swap_percent = harvested_data.swap.mem_used_in_mb as f64 + / harvested_data.swap.mem_total_in_mb as f64 + * 100.0; + let swap_joining_pt = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points(&time, last_pt.swap_data.0, &harvested_time, swap_percent) + } else { + Vec::new() + }; + let swap_pt = (swap_percent, swap_joining_pt); + new_entry.swap_data = swap_pt; + + // In addition copy over latest data for easy reference + self.memory_harvest = harvested_data.memory.clone(); + self.swap_harvest = harvested_data.swap.clone(); + } + + fn eat_network( + &mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData, + ) { // RX let rx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { generate_joining_points( @@ -82,12 +132,8 @@ impl DataCollection { let tx_pt = (harvested_data.network.tx as f64, tx_joining_pts); new_entry.tx_data = tx_pt; - // Copy over data + // In addition copy over latest data for easy reference self.network_harvest = harvested_data.network.clone(); - - // And we're done eating. - self.current_instant = harvested_time; - self.timed_data_vec.push((harvested_time, new_entry)); } } diff --git a/src/canvas.rs b/src/canvas.rs index 578241a9..440ea5e4 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -585,12 +585,13 @@ fn draw_memory_graph(f: &mut Frame, app_state: &app::App let x_axis: Axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) - .bounds([0.0, constants::TIME_STARTS_FROM as f64 * 10.0]); + .bounds([0.0, constants::TIME_STARTS_FROM as f64]); let y_axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) .bounds([-0.5, 100.5]) // Offset as the zero value isn't drawn otherwise... .labels(&["0%", "100%"]); + // TODO: [OPT] Move this let mem_name = "RAM:".to_string() + &format!( "{:3}%", diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 824f1e02..c51b4450 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -159,7 +159,7 @@ pub fn simple_update_process_row( process.name.to_ascii_lowercase().contains(matching_string) } }) - .map(|process| return_mapped_process(process, app_data)) + .map(|process| return_mapped_process(process)) .collect::>(); let mut grouped_process_vector: Vec = Vec::new(); @@ -177,7 +177,7 @@ pub fn simple_update_process_row( process.name.to_ascii_lowercase().contains(matching_string) } }) - .map(|process| return_mapped_process(process, app_data)) + .map(|process| return_mapped_process(process)) .collect::>(); } @@ -202,7 +202,7 @@ pub fn regex_update_process_row( true } }) - .map(|process| return_mapped_process(process, app_data)) + .map(|process| return_mapped_process(process)) .collect::>(); let mut grouped_process_vector: Vec = Vec::new(); @@ -220,34 +220,19 @@ pub fn regex_update_process_row( true } }) - .map(|process| return_mapped_process(process, app_data)) + .map(|process| return_mapped_process(process)) .collect::>(); } (process_vector, grouped_process_vector) } -fn return_mapped_process( - process: &data_harvester::processes::ProcessData, app_data: &data_harvester::Data, -) -> ConvertedProcessData { +fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> ConvertedProcessData { ConvertedProcessData { pid: process.pid, name: process.name.to_string(), cpu_usage: format!("{:.1}%", process.cpu_usage_percent), - mem_usage: format!( - "{:.1}%", - if let Some(mem_usage) = process.mem_usage_percent { - mem_usage - } else if let Some(mem_usage_kb) = process.mem_usage_kb { - if let Some(mem_data) = app_data.memory.last() { - (mem_usage_kb / 1000) as f64 / mem_data.mem_total_in_mb as f64 * 100_f64 // TODO: [OPT] Get rid of this - } else { - 0_f64 - } - } else { - 0_f64 - } - ), + mem_usage: format!("{:.1}%", process.mem_usage_percent), group: vec![], } } @@ -331,71 +316,65 @@ pub fn update_cpu_data_points( cpu_data_vector } -pub fn update_mem_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> { - convert_mem_data(&app_data.memory) -} - -pub fn update_swap_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> { - convert_mem_data(&app_data.swap) -} - -pub fn update_mem_data_values(app_data: &data_harvester::Data) -> Vec<(u64, u64)> { - let mut result: Vec<(u64, u64)> = Vec::new(); - result.push(get_most_recent_mem_values(&app_data.memory)); - result.push(get_most_recent_mem_values(&app_data.swap)); - - result -} - -fn get_most_recent_mem_values(mem_data: &[data_harvester::mem::MemData]) -> (u64, u64) { - let mut result: (u64, u64) = (0, 0); - - if !mem_data.is_empty() { - if let Some(most_recent) = mem_data.last() { - result.0 = most_recent.mem_used_in_mb; - result.1 = most_recent.mem_total_in_mb; - } - } - - result -} - -fn convert_mem_data(mem_data: &[data_harvester::mem::MemData]) -> Vec<(f64, f64)> { +pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> { let mut result: Vec<(f64, f64)> = Vec::new(); + let current_time = current_data.current_instant; - for data in mem_data { - let current_time = std::time::Instant::now(); - let new_entry = ( - ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - if data.mem_total_in_mb == 0 { - -1000.0 - } else { - (data.mem_used_in_mb as f64 * 100_f64) / data.mem_total_in_mb as f64 - }, - ); + for (time, data) in ¤t_data.timed_data_vec { + let time_from_start: f64 = (TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + .floor(); - // Now, inject our joining points... - if !result.is_empty() { - let previous_element_data = *(result.last().unwrap()); - for idx in 0..50 { - result.push(( - previous_element_data.0 - + ((new_entry.0 - previous_element_data.0) / 50.0 * f64::from(idx)), - previous_element_data.1 - + ((new_entry.1 - previous_element_data.1) / 50.0 * f64::from(idx)), - )); - } + //Insert joiner points + for &(joiner_offset, joiner_val) in &data.mem_data.1 { + let offset_time = time_from_start - joiner_offset as f64; + result.push((offset_time, joiner_val)); } - result.push(new_entry); + result.push((time_from_start, data.mem_data.0)); } result } +pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> { + let mut result: Vec<(f64, f64)> = Vec::new(); + let current_time = current_data.current_instant; + + for (time, data) in ¤t_data.timed_data_vec { + let time_from_start: f64 = (TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + .floor(); + + //Insert joiner points + for &(joiner_offset, joiner_val) in &data.swap_data.1 { + let offset_time = time_from_start - joiner_offset as f64; + result.push((offset_time, joiner_val)); + } + + result.push((time_from_start, data.swap_data.0)); + } + + result +} + +pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> Vec<(u64, u64)> { + let mut result: Vec<(u64, u64)> = Vec::new(); + + // This wants (u64, u64) values - left is usage in MB, right is total in MB + result.push(( + current_data.memory_harvest.mem_used_in_mb, + current_data.memory_harvest.mem_total_in_mb, + )); + + result.push(( + current_data.swap_harvest.mem_used_in_mb, + current_data.swap_harvest.mem_total_in_mb, + )); + + result +} + pub fn convert_network_data_points( current_data: &data_janitor::DataCollection, ) -> ConvertedNetworkData { @@ -433,8 +412,22 @@ pub fn convert_network_data_points( )); } - rx.push((time_from_start, data.rx_data.0)); - tx.push((time_from_start, data.tx_data.0)); + rx.push(( + time_from_start, + if data.rx_data.0 > 0.0 { + (data.rx_data.0).log(2.0) + } else { + 0.0 + }, + )); + tx.push(( + time_from_start, + if data.rx_data.0 > 0.0 { + (data.rx_data.0).log(2.0) + } else { + 0.0 + }, + )); } let total_rx_converted_result: (f64, String); diff --git a/src/main.rs b/src/main.rs index 7d2e005e..625a6288 100644 --- a/src/main.rs +++ b/src/main.rs @@ -179,7 +179,6 @@ fn main() -> error::Result<()> { let (rtx, rrx) = mpsc::channel(); { let tx = tx; - let mut first_run = true; let temp_type = app.temperature_type.clone(); thread::spawn(move || { let tx = tx.clone(); @@ -191,21 +190,11 @@ fn main() -> error::Result<()> { if let Ok(message) = rrx.try_recv() { match message { ResetEvent::Reset => { - //debug!("Received reset message"); - first_run = true; - data_state.data = app::data_harvester::Data::default(); + data_state.data.first_run_cleanup(); } } } futures::executor::block_on(data_state.update_data()); - - if first_run { - // Fix for if you set a really long time for update periods (and just gives a faster first value) - data_state.data.first_run_cleanup(); // TODO: [OPT] we can remove this later. - thread::sleep(Duration::from_millis(250)); - futures::executor::block_on(data_state.update_data()); - first_run = false; - } tx.send(Event::Update(Box::from(data_state.data.clone()))) .unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64)); @@ -295,9 +284,9 @@ fn main() -> error::Result<()> { app.canvas_data.disk_data = update_disk_row(&app.data); app.canvas_data.temp_sensor_data = update_temp_row(&app.data, &app.temperature_type); - app.canvas_data.mem_data = update_mem_data_points(&app.data); - app.canvas_data.memory_labels = update_mem_data_values(&app.data); - app.canvas_data.swap_data = update_swap_data_points(&app.data); + app.canvas_data.mem_data = update_mem_data_points(&app.data_collection); + app.canvas_data.swap_data = update_swap_data_points(&app.data_collection); + app.canvas_data.memory_labels = update_mem_labels(&app.data_collection); app.canvas_data.cpu_data = update_cpu_data_points(app.show_average_cpu, &app.data); } @@ -325,7 +314,7 @@ fn main() -> error::Result<()> { Ok(()) } -type TempProcess = (f64, Option, Option, Vec); +type TempProcess = (f64, f64, Vec); fn handle_process_sorting(app: &mut app::App) { // Handle combining multi-pid processes to form one entry in table. @@ -338,23 +327,12 @@ fn handle_process_sorting(app: &mut app::App) { // Fields for tuple: CPU%, MEM%, MEM_KB, PID_VEC let mut process_map: BTreeMap = BTreeMap::new(); for process in &app.data.list_of_processes { - let entry_val = - process_map - .entry(process.name.clone()) - .or_insert((0.0, None, None, vec![])); - if let Some(mem_usage) = process.mem_usage_percent { - entry_val.0 += process.cpu_usage_percent; - if let Some(m) = &mut entry_val.1 { - *m += mem_usage; - } - entry_val.3.push(process.pid); - } else if let Some(mem_usage_kb) = process.mem_usage_kb { - entry_val.0 += process.cpu_usage_percent; - if let Some(m) = &mut entry_val.2 { - *m += mem_usage_kb; - } - entry_val.3.push(process.pid); - } + let entry_val = process_map + .entry(process.name.clone()) + .or_insert((0.0, 0.0, vec![])); + entry_val.0 += process.cpu_usage_percent; + entry_val.1 += process.mem_usage_percent; + entry_val.2.push(process.pid); } // Now... turn this back into the exact same vector... but now with merged processes! @@ -366,9 +344,8 @@ fn handle_process_sorting(app: &mut app::App) { pid: 0, // Irrelevant cpu_usage_percent: data.0, mem_usage_percent: data.1, - mem_usage_kb: data.2, name: name.clone(), - pid_vec: Some(data.3.clone()), + pid_vec: Some(data.2.clone()), } }) .collect::>(), From c5cd431e2e82e58526dc990de584d661c3a3ff89 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 26 Jan 2020 16:44:24 -0500 Subject: [PATCH 12/26] Updated memory labels to be more straightforward in generation. --- src/app/data_janitor.rs | 22 ++++++++-------- src/canvas.rs | 56 +++++++++++------------------------------ src/data_conversion.rs | 45 ++++++++++++++++++++++++--------- src/main.rs | 4 ++- 4 files changed, 62 insertions(+), 65 deletions(-) diff --git a/src/app/data_janitor.rs b/src/app/data_janitor.rs index e87ec947..d361ffc0 100644 --- a/src/app/data_janitor.rs +++ b/src/app/data_janitor.rs @@ -85,16 +85,18 @@ impl DataCollection { new_entry.mem_data = mem_pt; // Swap - let swap_percent = harvested_data.swap.mem_used_in_mb as f64 - / harvested_data.swap.mem_total_in_mb as f64 - * 100.0; - let swap_joining_pt = if let Some((time, last_pt)) = self.timed_data_vec.last() { - generate_joining_points(&time, last_pt.swap_data.0, &harvested_time, swap_percent) - } else { - Vec::new() - }; - let swap_pt = (swap_percent, swap_joining_pt); - new_entry.swap_data = swap_pt; + if harvested_data.swap.mem_total_in_mb > 0 { + let swap_percent = harvested_data.swap.mem_used_in_mb as f64 + / harvested_data.swap.mem_total_in_mb as f64 + * 100.0; + let swap_joining_pt = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points(&time, last_pt.swap_data.0, &harvested_time, swap_percent) + } else { + Vec::new() + }; + let swap_pt = (swap_percent, swap_joining_pt); + new_entry.swap_data = swap_pt; + } // In addition copy over latest data for easy reference self.memory_harvest = harvested_data.memory.clone(); diff --git a/src/canvas.rs b/src/canvas.rs index 440ea5e4..3111ef24 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -96,7 +96,8 @@ pub struct CanvasData { pub temp_sensor_data: Vec>, pub process_data: Vec, pub grouped_process_data: Vec, - pub memory_labels: Vec<(u64, u64)>, + pub mem_label: String, + pub swap_label: String, pub mem_data: Vec<(f64, f64)>, pub swap_data: Vec<(f64, f64)>, pub cpu_data: Vec, @@ -581,7 +582,6 @@ fn draw_memory_table( fn draw_memory_graph(f: &mut Frame, app_state: &app::App, draw_loc: Rect) { let mem_data: &[(f64, f64)] = &(app_state.canvas_data.mem_data); let swap_data: &[(f64, f64)] = &(app_state.canvas_data.swap_data); - let memory_labels: &[(u64, u64)] = &(app_state.canvas_data.memory_labels); let x_axis: Axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) @@ -591,20 +591,8 @@ fn draw_memory_graph(f: &mut Frame, app_state: &app::App .bounds([-0.5, 100.5]) // Offset as the zero value isn't drawn otherwise... .labels(&["0%", "100%"]); - // TODO: [OPT] Move this - let mem_name = "RAM:".to_string() - + &format!( - "{:3}%", - (mem_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64) - ) + &format!( - " {:.1}GB/{:.1}GB", - memory_labels.first().unwrap_or(&(0, 0)).0 as f64 / 1024.0, - memory_labels.first().unwrap_or(&(0, 0)).1 as f64 / 1024.0 - ); - let swap_name: String; - let mut mem_canvas_vec: Vec = vec![Dataset::default() - .name(&mem_name) + .name(&app_state.canvas_data.mem_label) .marker(if app_state.use_dot { Marker::Dot } else { @@ -614,35 +602,19 @@ fn draw_memory_graph(f: &mut Frame, app_state: &app::App .data(&mem_data)]; if !(&swap_data).is_empty() { - if let Some(last_canvas_result) = (&swap_data).last() { - if last_canvas_result.1 >= 0.0 { - swap_name = "SWP:".to_string() - + &format!( - "{:3}%", - (swap_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64) - ) + &format!( - " {:.1}GB/{:.1}GB", - memory_labels[1].0 as f64 / 1024.0, - memory_labels[1].1 as f64 / 1024.0 - ); - mem_canvas_vec.push( - Dataset::default() - .name(&swap_name) - .marker(if app_state.use_dot { - Marker::Dot - } else { - Marker::Braille - }) - .style(Style::default().fg(COLOUR_LIST[1])) - .data(&swap_data), - ); - } - } + mem_canvas_vec.push( + Dataset::default() + .name(&app_state.canvas_data.swap_label) + .marker(if app_state.use_dot { + Marker::Dot + } else { + Marker::Braille + }) + .style(Style::default().fg(COLOUR_LIST[1])) + .data(&swap_data), + ); } - // Memory usage table - // draw_memory_table(f, &app_state, mem_labels, swap_labels, label_loc); - Chart::default() .block( Block::default() diff --git a/src/data_conversion.rs b/src/data_conversion.rs index c51b4450..3618c8d2 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -358,21 +358,42 @@ pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> V result } -pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> Vec<(u64, u64)> { - let mut result: Vec<(u64, u64)> = Vec::new(); +pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> (String, String) { + let mem_label = if current_data.memory_harvest.mem_total_in_mb == 0 { + "".to_string() + } else { + "RAM:".to_string() + + &format!( + "{:3.0}%", + (current_data.memory_harvest.mem_used_in_mb as f64 * 100.0 + / current_data.memory_harvest.mem_total_in_mb as f64) + .round() + ) + &format!( + " {:.1}GB/{:.1}GB", + current_data.memory_harvest.mem_used_in_mb as f64 / 1024.0, + current_data.memory_harvest.mem_total_in_mb as f64 / 1024.0 + ) + }; - // This wants (u64, u64) values - left is usage in MB, right is total in MB - result.push(( - current_data.memory_harvest.mem_used_in_mb, - current_data.memory_harvest.mem_total_in_mb, - )); + let swap_label = if current_data.swap_harvest.mem_total_in_mb == 0 { + "".to_string() + } else { + "SWP:".to_string() + + &format!( + "{:3.0}%", + (current_data.swap_harvest.mem_used_in_mb as f64 * 100.0 + / current_data.swap_harvest.mem_total_in_mb as f64) + .round() + ) + &format!( + " {:.1}GB/{:.1}GB", + current_data.swap_harvest.mem_used_in_mb as f64 / 1024.0, + current_data.swap_harvest.mem_total_in_mb as f64 / 1024.0 + ) + }; - result.push(( - current_data.swap_harvest.mem_used_in_mb, - current_data.swap_harvest.mem_total_in_mb, - )); + debug!("{:?}", mem_label); - result + (mem_label, swap_label) } pub fn convert_network_data_points( diff --git a/src/main.rs b/src/main.rs index 625a6288..4ac4e7f8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -286,7 +286,9 @@ fn main() -> error::Result<()> { update_temp_row(&app.data, &app.temperature_type); app.canvas_data.mem_data = update_mem_data_points(&app.data_collection); app.canvas_data.swap_data = update_swap_data_points(&app.data_collection); - app.canvas_data.memory_labels = update_mem_labels(&app.data_collection); + let memory_and_swap_labels = update_mem_labels(&app.data_collection); + app.canvas_data.mem_label = memory_and_swap_labels.0; + app.canvas_data.swap_label = memory_and_swap_labels.1; app.canvas_data.cpu_data = update_cpu_data_points(app.show_average_cpu, &app.data); } From 57aa15978ce2678c196f8ee93d9d45b9ff6f568d Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 26 Jan 2020 20:14:14 -0500 Subject: [PATCH 13/26] Moved CPU over --- src/app.rs | 27 +++------- src/app/data_harvester.rs | 25 +++------ src/app/data_harvester/cpu.rs | 21 +++----- src/app/data_janitor.rs | 42 ++++++++++++--- src/canvas.rs | 2 +- src/data_conversion.rs | 97 ++++++++++++----------------------- src/main.rs | 21 +++++--- 7 files changed, 104 insertions(+), 131 deletions(-) diff --git a/src/app.rs b/src/app.rs index c6c13691..3e672f6c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -639,15 +639,8 @@ impl App { self.currently_selected_disk_position = self.data.list_of_disks.len() as i64 - 1 } ApplicationPosition::Cpu => { - if let Some(cpu_package) = self.data.list_of_cpu_packages.last() { - if self.show_average_cpu { - self.currently_selected_cpu_table_position = - cpu_package.cpu_vec.len() as i64; - } else { - self.currently_selected_cpu_table_position = - cpu_package.cpu_vec.len() as i64 - 1; - } - } + self.currently_selected_cpu_table_position = + self.canvas_data.cpu_data.len() as i64 - 1; } _ => {} } @@ -685,17 +678,11 @@ impl App { } fn change_cpu_table_position(&mut self, num_to_change_by: i64) { - if let Some(cpu_package) = self.data.list_of_cpu_packages.last() { - if self.currently_selected_cpu_table_position + num_to_change_by >= 0 - && self.currently_selected_cpu_table_position + num_to_change_by - < if self.show_average_cpu { - cpu_package.cpu_vec.len() - } else { - cpu_package.cpu_vec.len() - 1 - } as i64 - { - self.currently_selected_cpu_table_position += num_to_change_by; - } + if self.currently_selected_cpu_table_position + num_to_change_by >= 0 + && self.currently_selected_cpu_table_position + num_to_change_by + < self.canvas_data.cpu_data.len() as i64 + { + self.currently_selected_cpu_table_position += num_to_change_by; } } diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index 100c3a9c..4127f09f 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -25,7 +25,7 @@ fn push_if_valid(result: &Result, vector_to_push: &mut #[derive(Clone, Debug)] pub struct Data { - pub list_of_cpu_packages: Vec, + pub cpu: cpu::CPUHarvest, pub list_of_io: Vec, pub memory: mem::MemHarvest, pub swap: mem::MemHarvest, @@ -40,7 +40,7 @@ pub struct Data { impl Default for Data { fn default() -> Self { Data { - list_of_cpu_packages: Vec::default(), + cpu: cpu::CPUHarvest::default(), list_of_io: Vec::default(), memory: mem::MemHarvest::default(), swap: mem::MemHarvest::default(), @@ -56,7 +56,6 @@ impl Default for Data { impl Data { pub fn first_run_cleanup(&mut self) { - self.list_of_cpu_packages = Vec::new(); self.list_of_io = Vec::new(); self.list_of_temperature_sensor = Vec::new(); self.list_of_processes = Vec::new(); @@ -66,6 +65,7 @@ impl Data { self.network.first_run_cleanup(); self.memory = mem::MemHarvest::default(); self.swap = mem::MemHarvest::default(); + self.cpu = cpu::CPUHarvest::default(); } } @@ -112,6 +112,7 @@ impl DataState { self.sys.refresh_all(); self.mem_total_kb = self.sys.get_total_memory(); futures::executor::block_on(self.update_data()); + std::thread::sleep(std::time::Duration::from_millis(250)); self.data.first_run_cleanup(); } @@ -145,11 +146,10 @@ impl DataState { self.data.swap = swap; } + // CPU + self.data.cpu = cpu::get_cpu_data_list(&self.sys); + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! - push_if_valid( - &cpu::get_cpu_data_list(&self.sys, ¤t_instant), - &mut self.data.list_of_cpu_packages, - ); set_if_valid( &processes::get_sorted_processes_list( &self.sys, @@ -192,17 +192,6 @@ impl DataState { for stale in stale_list { self.prev_pid_stats.remove(&stale); } - - self.data.list_of_cpu_packages = self - .data - .list_of_cpu_packages - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); - self.data.list_of_io = self .data .list_of_io diff --git a/src/app/data_harvester/cpu.rs b/src/app/data_harvester/cpu.rs index 4987a6a3..c3276827 100644 --- a/src/app/data_harvester/cpu.rs +++ b/src/app/data_harvester/cpu.rs @@ -1,33 +1,26 @@ -use std::time::Instant; use sysinfo::{ProcessorExt, System, SystemExt}; -#[derive(Debug, Clone)] +#[derive(Default, Debug, Clone)] pub struct CPUData { - pub cpu_name: Box, + pub cpu_name: String, pub cpu_usage: f64, } -#[derive(Debug, Clone)] -pub struct CPUPackage { +#[derive(Default, Debug, Clone)] +pub struct CPUHarvest { pub cpu_vec: Vec, - pub instant: Instant, } -pub fn get_cpu_data_list( - sys: &System, curr_time: &Instant, -) -> crate::utils::error::Result { +pub fn get_cpu_data_list(sys: &System) -> CPUHarvest { let cpu_data = sys.get_processor_list(); let mut cpu_vec = Vec::new(); for cpu in cpu_data { cpu_vec.push(CPUData { - cpu_name: Box::from(cpu.get_name()), + cpu_name: cpu.get_name().to_string(), cpu_usage: f64::from(cpu.get_cpu_usage()) * 100_f64, }); } - Ok(CPUPackage { - cpu_vec, - instant: *curr_time, - }) + CPUHarvest { cpu_vec } } diff --git a/src/app/data_janitor.rs b/src/app/data_janitor.rs index d361ffc0..e6e5f336 100644 --- a/src/app/data_janitor.rs +++ b/src/app/data_janitor.rs @@ -1,4 +1,4 @@ -use crate::data_harvester::{mem, network, Data}; +use crate::data_harvester::{cpu, mem, network, Data}; /// In charge of cleaning and managing data. I couldn't think of a better /// name for the file. use std::time::Instant; @@ -12,7 +12,7 @@ pub type JoinedDataPoints = (Value, Vec<(TimeOffset, Value)>); pub struct TimedData { pub rx_data: JoinedDataPoints, pub tx_data: JoinedDataPoints, - pub cpu_data: JoinedDataPoints, + pub cpu_data: Vec, pub mem_data: JoinedDataPoints, pub swap_data: JoinedDataPoints, pub temp_data: JoinedDataPoints, @@ -35,7 +35,7 @@ pub struct DataCollection { pub network_harvest: network::NetworkHarvest, pub memory_harvest: mem::MemHarvest, pub swap_harvest: mem::MemHarvest, - // pub process_data: ProcessData, + pub cpu_harvest: cpu::CPUHarvest, } impl Default for DataCollection { @@ -46,7 +46,7 @@ impl Default for DataCollection { network_harvest: network::NetworkHarvest::default(), memory_harvest: mem::MemHarvest::default(), swap_harvest: mem::MemHarvest::default(), - // process_data: ProcessData::default(), + cpu_harvest: cpu::CPUHarvest::default(), } } } @@ -64,6 +64,9 @@ impl DataCollection { // Memory and Swap self.eat_memory_and_swap(&harvested_data, &harvested_time, &mut new_entry); + // CPU + self.eat_cpu(&harvested_data, &harvested_time, &mut new_entry); + // And we're done eating. self.current_instant = harvested_time; self.timed_data_vec.push((harvested_time, new_entry)); @@ -137,6 +140,31 @@ impl DataCollection { // In addition copy over latest data for easy reference self.network_harvest = harvested_data.network.clone(); } + + fn eat_cpu( + &mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData, + ) { + // Note this only pre-calculates the data points - the names will be + // within the local copy of cpu_harvest. Since it's all sequential + // it probably doesn't matter anyways. + for (itx, cpu) in harvested_data.cpu.cpu_vec.iter().enumerate() { + let cpu_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { + generate_joining_points( + &time, + last_pt.cpu_data[itx].0, + &harvested_time, + cpu.cpu_usage, + ) + } else { + Vec::new() + }; + + let cpu_pt = (cpu.cpu_usage, cpu_joining_pts); + new_entry.cpu_data.push(cpu_pt); + } + + self.cpu_harvest = harvested_data.cpu.clone(); + } } pub fn generate_joining_points( @@ -151,13 +179,13 @@ pub fn generate_joining_points( // Let's generate... about this many points! let num_points = std::cmp::min( std::cmp::max( - (value_difference.abs() / (time_difference + 0.0001) * 1000.0) as u64, + (value_difference.abs() / (time_difference + 0.0001) * 500.0) as u64, 100, ), - 1000, + 500, ); - for itx in 0..num_points { + for itx in (0..num_points).step_by(1) { points.push(( time_difference - (itx as f64 / num_points as f64 * time_difference), start_y + (itx as f64 / num_points as f64 * value_difference), diff --git a/src/canvas.rs b/src/canvas.rs index 3111ef24..1cb7e4b6 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -391,7 +391,7 @@ fn draw_cpu_graph(f: &mut Frame, app_state: &app::App, d // CPU usage graph let x_axis: Axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) - .bounds([0.0, constants::TIME_STARTS_FROM as f64 * 10.0]); + .bounds([0.0, constants::TIME_STARTS_FROM as f64]); let y_axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) .bounds([-0.5, 100.5]) diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 3618c8d2..1b5dd0ee 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -9,7 +9,6 @@ use crate::{ }; use constants::*; use regex::Regex; -use std::time::Instant; #[derive(Default, Debug)] pub struct ConvertedNetworkData { @@ -238,78 +237,48 @@ fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> Co } pub fn update_cpu_data_points( - show_avg_cpu: bool, app_data: &data_harvester::Data, + show_avg_cpu: bool, current_data: &data_janitor::DataCollection, ) -> Vec { let mut cpu_data_vector: Vec = Vec::new(); - let mut cpu_collection: Vec> = Vec::new(); + let current_time = current_data.current_instant; + let cpu_listing_offset = if show_avg_cpu { 0 } else { 1 }; - if !app_data.list_of_cpu_packages.is_empty() { - // I'm sorry for the following if statement but I couldn't be bothered here... - for cpu_num in (if show_avg_cpu { 0 } else { 1 }) - ..app_data.list_of_cpu_packages.last().unwrap().cpu_vec.len() - { - let mut this_cpu_data: Vec = Vec::new(); + for (time, data) in ¤t_data.timed_data_vec { + let time_from_start: f64 = (TIME_STARTS_FROM as f64 + - current_time.duration_since(*time).as_millis() as f64) + .floor(); - for data in &app_data.list_of_cpu_packages { - let current_time = Instant::now(); - let current_cpu_usage = data.cpu_vec[cpu_num].cpu_usage; - - let new_entry = CpuPoint { - time: ((TIME_STARTS_FROM as f64 - - current_time.duration_since(data.instant).as_millis() as f64) - * 10_f64) - .floor(), - usage: current_cpu_usage, - }; - - // Now, inject our joining points... - if let Some(previous_element_data) = this_cpu_data.last().cloned() { - for idx in 0..50 { - this_cpu_data.push(CpuPoint { - time: previous_element_data.time - + ((new_entry.time - previous_element_data.time) / 50.0 - * f64::from(idx)), - usage: previous_element_data.usage - + ((new_entry.usage - previous_element_data.usage) / 50.0 - * f64::from(idx)), - }); - } - } - - this_cpu_data.push(new_entry); + for (itx, cpu) in data.cpu_data.iter().enumerate() { + if !show_avg_cpu && itx == 0 { + continue; } - cpu_collection.push(this_cpu_data); - } + // Check if the vector exists yet + let itx_offset = itx - cpu_listing_offset; + if cpu_data_vector.len() <= itx_offset { + cpu_data_vector.push(ConvertedCpuData::default()); + cpu_data_vector[itx_offset].cpu_name = if show_avg_cpu && itx_offset == 0 { + "AVG".to_string() + } else { + current_data.cpu_harvest.cpu_vec[itx] + .cpu_name + .to_uppercase() + }; + } - // Finally, add it all onto the end - for (i, data) in cpu_collection.iter().enumerate() { - if !app_data.list_of_cpu_packages.is_empty() { - // Commented out: this version includes the percentage in the label... - // cpu_data_vector.push(( - // // + 1 to skip total CPU if show_avg_cpu is false - // format!( - // "{:4}: ", - // &*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec[i + if show_avg_cpu { 0 } else { 1 }].cpu_name) - // ) - // .to_uppercase() + &format!("{:3}%", (data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)), - // data.clone(), - // )) - cpu_data_vector.push(ConvertedCpuData { - cpu_name: format!( - "{} ", - if show_avg_cpu && i == 0 { - "AVG" - } else { - &*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec - [i + if show_avg_cpu { 0 } else { 1 }] - .cpu_name) - } - ) - .to_uppercase(), - cpu_data: data.clone(), + //Insert joiner points + for &(joiner_offset, joiner_val) in &cpu.1 { + let offset_time = time_from_start - joiner_offset as f64; + cpu_data_vector[itx_offset].cpu_data.push(CpuPoint { + time: offset_time, + usage: joiner_val, }); } + + cpu_data_vector[itx_offset].cpu_data.push(CpuPoint { + time: time_from_start, + usage: cpu.0, + }); } } diff --git a/src/main.rs b/src/main.rs index 4ac4e7f8..2e40c3cc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -264,16 +264,13 @@ fn main() -> error::Result<()> { _ => {} }, Event::Update(data) => { - // NOTE TO SELF - data is refreshed into app state HERE! That means, if it is - // frozen, then, app.data is never refreshed, until unfrozen! if !app.is_frozen { app.data_collection.eat_data(&data); + app.data = *data; // TODO: [OPT] remove this - app.data = *data; + // Convert all data into tui-compliant components - handle_process_sorting(&mut app); - - // Convert all data into tui components + // Network let network_data = convert_network_data_points(&app.data_collection); app.canvas_data.network_data_rx = network_data.rx; app.canvas_data.network_data_tx = network_data.tx; @@ -281,16 +278,26 @@ fn main() -> error::Result<()> { app.canvas_data.tx_display = network_data.tx_display; app.canvas_data.total_rx_display = network_data.total_rx_display; app.canvas_data.total_tx_display = network_data.total_tx_display; + + // Disk app.canvas_data.disk_data = update_disk_row(&app.data); + + // Temperatures app.canvas_data.temp_sensor_data = update_temp_row(&app.data, &app.temperature_type); + // Memory app.canvas_data.mem_data = update_mem_data_points(&app.data_collection); app.canvas_data.swap_data = update_swap_data_points(&app.data_collection); let memory_and_swap_labels = update_mem_labels(&app.data_collection); app.canvas_data.mem_label = memory_and_swap_labels.0; app.canvas_data.swap_label = memory_and_swap_labels.1; + + // CPU app.canvas_data.cpu_data = - update_cpu_data_points(app.show_average_cpu, &app.data); + update_cpu_data_points(app.show_average_cpu, &app.data_collection); + + // Processes + handle_process_sorting(&mut app); } } } From abe8a4bb861988daf67e37803b05d94507eddbda Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 21:24:52 -0500 Subject: [PATCH 14/26] Moved temp and drives over... --- src/app.rs | 36 ++++---- src/app/{data_janitor.rs => data_farmer.rs} | 94 ++++++++++++++++++-- src/app/data_harvester.rs | 65 ++++++-------- src/app/data_harvester/cpu.rs | 7 +- src/app/data_harvester/disks.rs | 53 +++++------ src/app/data_harvester/processes.rs | 16 ++-- src/app/data_harvester/temperature.rs | 18 ++-- src/canvas.rs | 10 +-- src/constants.rs | 2 +- src/data_conversion.rs | 98 ++++++++------------- src/main.rs | 15 ++-- 11 files changed, 217 insertions(+), 197 deletions(-) rename src/app/{data_janitor.rs => data_farmer.rs} (64%) diff --git a/src/app.rs b/src/app.rs index 3e672f6c..5b398c93 100644 --- a/src/app.rs +++ b/src/app.rs @@ -2,10 +2,10 @@ pub mod data_harvester; use data_harvester::{processes, temperature}; use std::time::Instant; -pub mod data_janitor; -use data_janitor::*; +pub mod data_farmer; +use data_farmer::*; -use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; +use crate::{canvas, constants, data_conversion::ConvertedProcessHarvest, utils::error::Result}; mod process_killer; @@ -76,7 +76,7 @@ pub struct App { pub show_help: bool, pub show_dd: bool, pub dd_err: Option, - to_delete_process_list: Option>, + to_delete_process_list: Option>, pub is_frozen: bool, pub left_legend: bool, pub use_current_cpu_total: bool, @@ -196,19 +196,13 @@ impl App { self.enable_grouping } - pub fn toggle_searching(&mut self) { + pub fn enable_searching(&mut self) { if !self.is_in_dialog() { match self.current_application_position { ApplicationPosition::Process | ApplicationPosition::ProcessSearch => { - if self.enable_searching { - // Toggle off - self.enable_searching = false; - self.current_application_position = ApplicationPosition::Process; - } else { - // Toggle on - self.enable_searching = true; - self.current_application_position = ApplicationPosition::ProcessSearch; - } + // Toggle on + self.enable_searching = true; + self.current_application_position = ApplicationPosition::ProcessSearch; } _ => {} } @@ -397,7 +391,7 @@ impl App { } else { match caught_char { '/' => { - self.toggle_searching(); + self.enable_searching(); } 'd' => { if let ApplicationPosition::Process = self.current_application_position { @@ -405,7 +399,7 @@ impl App { self.awaiting_second_char = false; self.second_char = ' '; let current_process = if self.is_grouped() { - let mut res: Vec = Vec::new(); + let mut res: Vec = Vec::new(); for pid in &self.canvas_data.grouped_process_data [self.currently_selected_process_position as usize] .group @@ -530,7 +524,7 @@ impl App { Ok(()) } - pub fn get_current_highlighted_process_list(&self) -> Option> { + pub fn get_current_highlighted_process_list(&self) -> Option> { self.to_delete_process_list.clone() } @@ -633,10 +627,10 @@ impl App { } ApplicationPosition::Temp => { self.currently_selected_temperature_position = - self.data.list_of_temperature_sensor.len() as i64 - 1 + self.data.temperature_sensors.len() as i64 - 1 } ApplicationPosition::Disk => { - self.currently_selected_disk_position = self.data.list_of_disks.len() as i64 - 1 + self.currently_selected_disk_position = self.data.disks.len() as i64 - 1 } ApplicationPosition::Cpu => { self.currently_selected_cpu_table_position = @@ -698,7 +692,7 @@ impl App { fn change_temp_position(&mut self, num_to_change_by: i64) { if self.currently_selected_temperature_position + num_to_change_by >= 0 && self.currently_selected_temperature_position + num_to_change_by - < self.data.list_of_temperature_sensor.len() as i64 + < self.data.temperature_sensors.len() as i64 { self.currently_selected_temperature_position += num_to_change_by; } @@ -707,7 +701,7 @@ impl App { fn change_disk_position(&mut self, num_to_change_by: i64) { if self.currently_selected_disk_position + num_to_change_by >= 0 && self.currently_selected_disk_position + num_to_change_by - < self.data.list_of_disks.len() as i64 + < self.data.disks.len() as i64 { self.currently_selected_disk_position += num_to_change_by; } diff --git a/src/app/data_janitor.rs b/src/app/data_farmer.rs similarity index 64% rename from src/app/data_janitor.rs rename to src/app/data_farmer.rs index e6e5f336..0acb7343 100644 --- a/src/app/data_janitor.rs +++ b/src/app/data_farmer.rs @@ -1,6 +1,18 @@ -use crate::data_harvester::{cpu, mem, network, Data}; -/// In charge of cleaning and managing data. I couldn't think of a better -/// name for the file. +use crate::data_harvester::{cpu, disks, mem, network, processes, temperature, Data}; +/// In charge of cleaning, processing, and managing data. I couldn't think of +/// a better name for the file. Since I called data collection "harvesting", +/// then this is the farmer I guess. +/// +/// Essentially the main goal is to shift the initial calculation and distribution +/// of joiner points and data to one central location that will only do it +/// *once* upon receiving the data --- as opposed to doing it on canvas draw, +/// which will be a costly process. +/// +/// This will also handle the *cleaning* of stale data. That should be done +/// in some manner (timer on another thread, some loop) that will occasionally +/// call the purging function. Failure to do so *will* result in a growing +/// memory usage and higher CPU usage - you will be trying to process more and +/// more points as this is used! use std::time::Instant; use std::vec::Vec; @@ -15,8 +27,9 @@ pub struct TimedData { pub cpu_data: Vec, pub mem_data: JoinedDataPoints, pub swap_data: JoinedDataPoints, - pub temp_data: JoinedDataPoints, - pub io_data: JoinedDataPoints, + // Unused for now + // pub io_data : JoinedDataPoints + // pub temp_data: JoinedDataPoints, } /// AppCollection represents the pooled data stored within the main app @@ -36,6 +49,12 @@ pub struct DataCollection { pub memory_harvest: mem::MemHarvest, pub swap_harvest: mem::MemHarvest, pub cpu_harvest: cpu::CPUHarvest, + pub process_harvest: processes::ProcessHarvest, + pub disk_harvest: Vec, + pub io_harvest: disks::IOHarvest, + pub io_labels: Vec<(u64, u64)>, + io_prev: Vec<(u64, u64)>, + pub temp_harvest: Vec, } impl Default for DataCollection { @@ -47,12 +66,20 @@ impl Default for DataCollection { memory_harvest: mem::MemHarvest::default(), swap_harvest: mem::MemHarvest::default(), cpu_harvest: cpu::CPUHarvest::default(), + process_harvest: processes::ProcessHarvest::default(), + disk_harvest: Vec::default(), + io_harvest: disks::IOHarvest::default(), + io_labels: Vec::default(), + io_prev: Vec::default(), + temp_harvest: Vec::default(), } } } impl DataCollection { - pub fn clean_data(&mut self) {} + pub fn clean_data(&mut self) { + // TODO: [OPT] To implement to clean + } pub fn eat_data(&mut self, harvested_data: &Data) { let harvested_time = harvested_data.last_collection_time; @@ -67,6 +94,14 @@ impl DataCollection { // CPU self.eat_cpu(&harvested_data, &harvested_time, &mut new_entry); + // Temp + self.eat_temp(&harvested_data, &harvested_time, &mut new_entry); + + // Disks + self.eat_disks(&harvested_data, &harvested_time, &mut new_entry); + + // Processes + // And we're done eating. self.current_instant = harvested_time; self.timed_data_vec.push((harvested_time, new_entry)); @@ -147,7 +182,7 @@ impl DataCollection { // Note this only pre-calculates the data points - the names will be // within the local copy of cpu_harvest. Since it's all sequential // it probably doesn't matter anyways. - for (itx, cpu) in harvested_data.cpu.cpu_vec.iter().enumerate() { + for (itx, cpu) in harvested_data.cpu.iter().enumerate() { let cpu_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() { generate_joining_points( &time, @@ -165,6 +200,51 @@ impl DataCollection { self.cpu_harvest = harvested_data.cpu.clone(); } + + fn eat_temp( + &mut self, harvested_data: &Data, _harvested_time: &Instant, _new_entry: &mut TimedData, + ) { + // TODO: [PO] To implement + self.temp_harvest = harvested_data.temperature_sensors.clone(); + } + + fn eat_disks( + &mut self, harvested_data: &Data, harvested_time: &Instant, _new_entry: &mut TimedData, + ) { + // TODO: [PO] To implement + + let time_since_last_harvest = harvested_time + .duration_since(self.current_instant) + .as_secs_f64(); + + for (itx, device) in harvested_data.disks.iter().enumerate() { + if let Some(trim) = device.name.split('/').last() { + let io_device = harvested_data.io.get(trim); + if let Some(io) = io_device { + let io_r_pt = io.read_bytes; + let io_w_pt = io.write_bytes; + + if self.io_labels.len() <= itx { + self.io_prev.push((io_r_pt, io_w_pt)); + self.io_labels.push((0, 0)); + } else { + let r_rate = ((io_r_pt - self.io_prev[itx].0) as f64 + / time_since_last_harvest) + .round() as u64; + let w_rate = ((io_w_pt - self.io_prev[itx].1) as f64 + / time_since_last_harvest) + .round() as u64; + + self.io_labels[itx] = (r_rate, w_rate); + self.io_prev[itx] = (io_r_pt, io_w_pt); + } + } + } + } + + self.disk_harvest = harvested_data.disks.clone(); + self.io_harvest = harvested_data.io.clone(); + } } pub fn generate_joining_points( diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index 4127f09f..fc468f7e 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -17,23 +17,17 @@ fn set_if_valid(result: &Result, value_to_set: &mut T) } } -fn push_if_valid(result: &Result, vector_to_push: &mut Vec) { - if let Ok(result) = result { - vector_to_push.push(result.clone()); - } -} - #[derive(Clone, Debug)] pub struct Data { pub cpu: cpu::CPUHarvest, - pub list_of_io: Vec, pub memory: mem::MemHarvest, pub swap: mem::MemHarvest, - pub list_of_temperature_sensor: Vec, + pub temperature_sensors: Vec, pub network: network::NetworkHarvest, - pub list_of_processes: Vec, - pub grouped_list_of_processes: Option>, - pub list_of_disks: Vec, + pub list_of_processes: Vec, + pub grouped_list_of_processes: Option>, + pub disks: Vec, + pub io: disks::IOHarvest, pub last_collection_time: Instant, } @@ -41,13 +35,13 @@ impl Default for Data { fn default() -> Self { Data { cpu: cpu::CPUHarvest::default(), - list_of_io: Vec::default(), memory: mem::MemHarvest::default(), swap: mem::MemHarvest::default(), - list_of_temperature_sensor: Vec::default(), + temperature_sensors: Vec::default(), list_of_processes: Vec::default(), grouped_list_of_processes: None, - list_of_disks: Vec::default(), + disks: Vec::default(), + io: disks::IOHarvest::default(), network: network::NetworkHarvest::default(), last_collection_time: Instant::now(), } @@ -56,11 +50,11 @@ impl Default for Data { impl Data { pub fn first_run_cleanup(&mut self) { - self.list_of_io = Vec::new(); - self.list_of_temperature_sensor = Vec::new(); + self.io = disks::IOHarvest::default(); + self.temperature_sensors = Vec::new(); self.list_of_processes = Vec::new(); self.grouped_list_of_processes = None; - self.list_of_disks = Vec::new(); + self.disks = Vec::new(); self.network.first_run_cleanup(); self.memory = mem::MemHarvest::default(); @@ -149,6 +143,20 @@ impl DataState { // CPU self.data.cpu = cpu::get_cpu_data_list(&self.sys); + // Disks + if let Ok(disks) = disks::get_disk_usage_list().await { + self.data.disks = disks; + } + if let Ok(io) = disks::get_io_usage_list(false).await { + self.data.io = io; + } + + // Temp + if let Ok(temp) = temperature::get_temperature_data(&self.sys, &self.temperature_type).await + { + self.data.temperature_sensors = temp; + } + // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! set_if_valid( &processes::get_sorted_processes_list( @@ -163,19 +171,7 @@ impl DataState { &mut self.data.list_of_processes, ); - set_if_valid( - &disks::get_disk_usage_list().await, - &mut self.data.list_of_disks, - ); - push_if_valid( - &disks::get_io_usage_list(false).await, - &mut self.data.list_of_io, - ); - set_if_valid( - &temperature::get_temperature_data(&self.sys, &self.temperature_type).await, - &mut self.data.list_of_temperature_sensor, - ); - + // Update time self.data.last_collection_time = current_instant; // Filter out stale timed entries @@ -192,15 +188,6 @@ impl DataState { for stale in stale_list { self.prev_pid_stats.remove(&stale); } - self.data.list_of_io = self - .data - .list_of_io - .iter() - .cloned() - .filter(|entry| { - clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds - }) - .collect::>(); self.last_clean = clean_instant; } diff --git a/src/app/data_harvester/cpu.rs b/src/app/data_harvester/cpu.rs index c3276827..136a36dd 100644 --- a/src/app/data_harvester/cpu.rs +++ b/src/app/data_harvester/cpu.rs @@ -6,10 +6,7 @@ pub struct CPUData { pub cpu_usage: f64, } -#[derive(Default, Debug, Clone)] -pub struct CPUHarvest { - pub cpu_vec: Vec, -} +pub type CPUHarvest = Vec; pub fn get_cpu_data_list(sys: &System) -> CPUHarvest { let cpu_data = sys.get_processor_list(); @@ -22,5 +19,5 @@ pub fn get_cpu_data_list(sys: &System) -> CPUHarvest { }); } - CPUHarvest { cpu_vec } + cpu_vec } diff --git a/src/app/data_harvester/disks.rs b/src/app/data_harvester/disks.rs index bf564ab2..7a7f52e2 100644 --- a/src/app/data_harvester/disks.rs +++ b/src/app/data_harvester/disks.rs @@ -1,11 +1,10 @@ use futures::stream::StreamExt; use heim::units::information; -use std::time::Instant; #[derive(Debug, Clone, Default)] -pub struct DiskData { - pub name: Box, - pub mount_point: Box, +pub struct DiskHarvest { + pub name: String, + pub mount_point: String, pub free_space: u64, pub used_space: u64, pub total_space: u64, @@ -13,18 +12,13 @@ pub struct DiskData { #[derive(Clone, Debug)] pub struct IOData { - pub mount_point: Box, pub read_bytes: u64, pub write_bytes: u64, } -#[derive(Debug, Clone)] -pub struct IOPackage { - pub io_hash: std::collections::HashMap, - pub instant: Instant, -} +pub type IOHarvest = std::collections::HashMap; -pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result { +pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result { let mut io_hash: std::collections::HashMap = std::collections::HashMap::new(); if get_physical { let mut physical_counter_stream = heim::disk::io_counters_physical(); @@ -34,7 +28,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul io_hash.insert( mount_point.to_string(), IOData { - mount_point: Box::from(mount_point), read_bytes: io.read_bytes().get::(), write_bytes: io.write_bytes().get::(), }, @@ -48,7 +41,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul io_hash.insert( mount_point.to_string(), IOData { - mount_point: Box::from(mount_point), read_bytes: io.read_bytes().get::(), write_bytes: io.write_bytes().get::(), }, @@ -56,14 +48,11 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul } } - Ok(IOPackage { - io_hash, - instant: Instant::now(), - }) + Ok(io_hash) } -pub async fn get_disk_usage_list() -> crate::utils::error::Result> { - let mut vec_disks: Vec = Vec::new(); +pub async fn get_disk_usage_list() -> crate::utils::error::Result> { + let mut vec_disks: Vec = Vec::new(); let mut partitions_stream = heim::disk::partitions_physical(); while let Some(part) = partitions_stream.next().await { @@ -71,23 +60,21 @@ pub async fn get_disk_usage_list() -> crate::utils::error::Result> let partition = part; let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?; - vec_disks.push(DiskData { + vec_disks.push(DiskHarvest { free_space: usage.free().get::(), used_space: usage.used().get::(), total_space: usage.total().get::(), - mount_point: Box::from( - partition - .mount_point() - .to_str() - .unwrap_or("Name Unavailable"), - ), - name: Box::from( - partition - .device() - .unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")) - .to_str() - .unwrap_or("Name Unavailable"), - ), + mount_point: (partition + .mount_point() + .to_str() + .unwrap_or("Name Unavailable")) + .to_string(), + name: (partition + .device() + .unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")) + .to_str() + .unwrap_or("Name Unavailable")) + .to_string(), }); } } diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs index 25d3a6d4..c72c6d01 100644 --- a/src/app/data_harvester/processes.rs +++ b/src/app/data_harvester/processes.rs @@ -18,7 +18,7 @@ impl Default for ProcessSorting { } #[derive(Debug, Clone, Default)] -pub struct ProcessData { +pub struct ProcessHarvest { pub pid: u32, pub cpu_usage_percent: f64, pub mem_usage_percent: f64, @@ -182,9 +182,9 @@ fn convert_ps( process: &str, cpu_usage: f64, cpu_percentage: f64, prev_pid_stats: &mut HashMap, use_current_cpu_total: bool, curr_time: &Instant, -) -> std::io::Result { +) -> std::io::Result { if process.trim().to_string().is_empty() { - return Ok(ProcessData { + return Ok(ProcessHarvest { pid: 0, name: "".to_string(), mem_usage_percent: 0.0, @@ -205,7 +205,7 @@ fn convert_ps( .parse::() .unwrap_or(0_f64); - Ok(ProcessData { + Ok(ProcessHarvest { pid, name, mem_usage_percent, @@ -225,8 +225,8 @@ pub fn get_sorted_processes_list( sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64, prev_pid_stats: &mut std::collections::HashMap, use_current_cpu_total: bool, mem_total_kb: u64, curr_time: &Instant, -) -> crate::utils::error::Result> { - let mut process_vector: Vec = Vec::new(); +) -> crate::utils::error::Result> { + let mut process_vector: Vec = Vec::new(); if cfg!(target_os = "linux") { // Linux specific - this is a massive pain... ugh. @@ -282,7 +282,7 @@ pub fn get_sorted_processes_list( process_val.name().to_string() }; - process_vector.push(ProcessData { + process_vector.push(ProcessHarvest { pid: process_val.pid() as u32, name, mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64, @@ -296,7 +296,7 @@ pub fn get_sorted_processes_list( } pub fn sort_processes( - process_vector: &mut Vec, sorting_method: &ProcessSorting, reverse_order: bool, + process_vector: &mut Vec, sorting_method: &ProcessSorting, reverse_order: bool, ) { // Always sort alphabetically first! process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, false)); diff --git a/src/app/data_harvester/temperature.rs b/src/app/data_harvester/temperature.rs index 1bee113e..77ffa4d6 100644 --- a/src/app/data_harvester/temperature.rs +++ b/src/app/data_harvester/temperature.rs @@ -3,9 +3,9 @@ use heim::units::thermodynamic_temperature; use std::cmp::Ordering; use sysinfo::{ComponentExt, System, SystemExt}; -#[derive(Debug, Clone)] -pub struct TempData { - pub component_name: Box, +#[derive(Default, Debug, Clone)] +pub struct TempHarvest { + pub component_name: String, pub temperature: f32, } @@ -24,15 +24,15 @@ impl Default for TemperatureType { pub async fn get_temperature_data( sys: &System, temp_type: &TemperatureType, -) -> crate::utils::error::Result> { - let mut temperature_vec: Vec = Vec::new(); +) -> crate::utils::error::Result> { + let mut temperature_vec: Vec = Vec::new(); if cfg!(target_os = "linux") { let mut sensor_data = heim::sensors::temperatures(); while let Some(sensor) = sensor_data.next().await { if let Ok(sensor) = sensor { - temperature_vec.push(TempData { - component_name: Box::from(sensor.unit()), + temperature_vec.push(TempHarvest { + component_name: sensor.unit().to_string(), temperature: match temp_type { TemperatureType::Celsius => sensor .current() @@ -52,8 +52,8 @@ pub async fn get_temperature_data( } else { let sensor_data = sys.get_components_list(); for component in sensor_data { - temperature_vec.push(TempData { - component_name: Box::from(component.get_label()), + temperature_vec.push(TempHarvest { + component_name: component.get_label().to_string(), temperature: match temp_type { TemperatureType::Celsius => component.get_temperature(), TemperatureType::Kelvin => component.get_temperature() + 273.15, diff --git a/src/canvas.rs b/src/canvas.rs index 1cb7e4b6..2c3ce977 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -1,6 +1,6 @@ use crate::{ app, constants, - data_conversion::{ConvertedCpuData, ConvertedProcessData}, + data_conversion::{ConvertedCpuData, ConvertedProcessHarvest}, utils::{error, gen_util::*}, }; use std::cmp::max; @@ -94,8 +94,8 @@ pub struct CanvasData { pub network_data_tx: Vec<(f64, f64)>, pub disk_data: Vec>, pub temp_sensor_data: Vec>, - pub process_data: Vec, - pub grouped_process_data: Vec, + pub process_data: Vec, + pub grouped_process_data: Vec, pub mem_label: String, pub swap_label: String, pub mem_data: Vec<(f64, f64)>, @@ -951,7 +951,7 @@ fn draw_search_field( fn draw_processes_table( f: &mut Frame, app_state: &mut app::App, draw_loc: Rect, ) { - let process_data: &[ConvertedProcessData] = if app_state.is_grouped() { + let process_data: &[ConvertedProcessHarvest] = if app_state.is_grouped() { &app_state.canvas_data.grouped_process_data } else { &app_state.canvas_data.process_data @@ -971,7 +971,7 @@ fn draw_processes_table( app_state.currently_selected_process_position, ); - let sliced_vec: Vec = (&process_data[start_position as usize..]).to_vec(); + let sliced_vec: Vec = (&process_data[start_position as usize..]).to_vec(); let mut process_counter = 0; // Draw! diff --git a/src/constants.rs b/src/constants.rs index 1dad85a9..1b93ea1b 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,7 +1,7 @@ // TODO: Store like three minutes of data, then change how much is shown based on scaling! pub const STALE_MAX_MILLISECONDS: u64 = 180 * 1000; // We wish to store at most 60 seconds worth of data. This may change in the future, or be configurable. pub const TIME_STARTS_FROM: u64 = 60 * 1000; -pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // We use this as it's a good value to work with. +pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u128 = 1000; pub const MAX_KEY_TIMEOUT_IN_MILLISECONDS: u128 = 1000; pub const NUM_COLOURS: i32 = 256; diff --git a/src/data_conversion.rs b/src/data_conversion.rs index 1b5dd0ee..a01842f2 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -2,8 +2,9 @@ //! can actually handle. use crate::{ + app::data_farmer, app::data_harvester, - app::data_janitor, + app::App, constants, utils::gen_util::{get_exact_byte_values, get_simple_byte_values}, }; @@ -21,7 +22,7 @@ pub struct ConvertedNetworkData { } #[derive(Clone, Default, Debug)] -pub struct ConvertedProcessData { +pub struct ConvertedProcessHarvest { pub pid: u32, pub name: String, pub cpu_usage: String, @@ -55,15 +56,16 @@ impl From<&CpuPoint> for (f64, f64) { } } -pub fn update_temp_row( - app_data: &data_harvester::Data, temp_type: &data_harvester::temperature::TemperatureType, -) -> Vec> { +pub fn update_temp_row(app: &App) -> Vec> { let mut sensor_vector: Vec> = Vec::new(); - if (&app_data.list_of_temperature_sensor).is_empty() { + let current_data = &app.data_collection; + let temp_type = &app.temperature_type; + + if current_data.temp_harvest.is_empty() { sensor_vector.push(vec!["No Sensors Found".to_string(), "".to_string()]) } else { - for sensor in &app_data.list_of_temperature_sensor { + for sensor in ¤t_data.temp_harvest { sensor_vector.push(vec![ sensor.component_name.to_string(), (sensor.temperature.ceil() as u64).to_string() @@ -79,44 +81,18 @@ pub fn update_temp_row( sensor_vector } -pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec> { +pub fn update_disk_row(current_data: &data_farmer::DataCollection) -> Vec> { let mut disk_vector: Vec> = Vec::new(); - for disk in &app_data.list_of_disks { - let io_activity = { - let mut final_result = ("0B/s".to_string(), "0B/s".to_string()); - if app_data.list_of_io.len() > 2 { - if let Some(io_package) = &app_data.list_of_io.last() { - if let Some(trimmed_mount) = disk.name.to_string().split('/').last() { - let prev_io_package = &app_data.list_of_io[app_data.list_of_io.len() - 2]; - - let io_hashmap = &io_package.io_hash; - let prev_io_hashmap = &prev_io_package.io_hash; - let time_difference = io_package - .instant - .duration_since(prev_io_package.instant) - .as_secs_f64(); - if io_hashmap.contains_key(trimmed_mount) - && prev_io_hashmap.contains_key(trimmed_mount) - { - // Ideally change this... - let ele = &io_hashmap[trimmed_mount]; - let prev = &prev_io_hashmap[trimmed_mount]; - let read_bytes_per_sec = ((ele.read_bytes - prev.read_bytes) as f64 - / time_difference) as u64; - let write_bytes_per_sec = ((ele.write_bytes - prev.write_bytes) as f64 - / time_difference) as u64; - let converted_read = get_simple_byte_values(read_bytes_per_sec, false); - let converted_write = - get_simple_byte_values(write_bytes_per_sec, false); - final_result = ( - format!("{:.*}{}/s", 0, converted_read.0, converted_read.1), - format!("{:.*}{}/s", 0, converted_write.0, converted_write.1), - ); - } - } - } - } - final_result + for (itx, disk) in current_data.disk_harvest.iter().enumerate() { + let io_activity = if current_data.io_labels.len() > itx { + let converted_read = get_simple_byte_values(current_data.io_labels[itx].0, false); + let converted_write = get_simple_byte_values(current_data.io_labels[itx].1, false); + ( + format!("{:.*}{}/s", 0, converted_read.0, converted_read.1), + format!("{:.*}{}/s", 0, converted_write.0, converted_write.1), + ) + } else { + ("0B/s".to_string(), "0B/s".to_string()) }; let converted_free_space = get_simple_byte_values(disk.free_space, false); @@ -143,8 +119,8 @@ pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec> { pub fn simple_update_process_row( app_data: &data_harvester::Data, matching_string: &str, use_pid: bool, -) -> (Vec, Vec) { - let process_vector: Vec = app_data +) -> (Vec, Vec) { + let process_vector: Vec = app_data .list_of_processes .iter() .filter(|process| { @@ -161,7 +137,7 @@ pub fn simple_update_process_row( .map(|process| return_mapped_process(process)) .collect::>(); - let mut grouped_process_vector: Vec = Vec::new(); + let mut grouped_process_vector: Vec = Vec::new(); if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes { grouped_process_vector = grouped_list_of_processes .iter() @@ -186,8 +162,8 @@ pub fn simple_update_process_row( pub fn regex_update_process_row( app_data: &data_harvester::Data, regex_matcher: &std::result::Result, use_pid: bool, -) -> (Vec, Vec) { - let process_vector: Vec = app_data +) -> (Vec, Vec) { + let process_vector: Vec = app_data .list_of_processes .iter() .filter(|process| { @@ -204,7 +180,7 @@ pub fn regex_update_process_row( .map(|process| return_mapped_process(process)) .collect::>(); - let mut grouped_process_vector: Vec = Vec::new(); + let mut grouped_process_vector: Vec = Vec::new(); if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes { grouped_process_vector = grouped_list_of_processes .iter() @@ -226,8 +202,10 @@ pub fn regex_update_process_row( (process_vector, grouped_process_vector) } -fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> ConvertedProcessData { - ConvertedProcessData { +fn return_mapped_process( + process: &data_harvester::processes::ProcessHarvest, +) -> ConvertedProcessHarvest { + ConvertedProcessHarvest { pid: process.pid, name: process.name.to_string(), cpu_usage: format!("{:.1}%", process.cpu_usage_percent), @@ -237,7 +215,7 @@ fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> Co } pub fn update_cpu_data_points( - show_avg_cpu: bool, current_data: &data_janitor::DataCollection, + show_avg_cpu: bool, current_data: &data_farmer::DataCollection, ) -> Vec { let mut cpu_data_vector: Vec = Vec::new(); let current_time = current_data.current_instant; @@ -260,9 +238,7 @@ pub fn update_cpu_data_points( cpu_data_vector[itx_offset].cpu_name = if show_avg_cpu && itx_offset == 0 { "AVG".to_string() } else { - current_data.cpu_harvest.cpu_vec[itx] - .cpu_name - .to_uppercase() + current_data.cpu_harvest[itx].cpu_name.to_uppercase() }; } @@ -285,7 +261,7 @@ pub fn update_cpu_data_points( cpu_data_vector } -pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> { +pub fn update_mem_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> { let mut result: Vec<(f64, f64)> = Vec::new(); let current_time = current_data.current_instant; @@ -306,7 +282,7 @@ pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Ve result } -pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> { +pub fn update_swap_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> { let mut result: Vec<(f64, f64)> = Vec::new(); let current_time = current_data.current_instant; @@ -327,7 +303,7 @@ pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> V result } -pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> (String, String) { +pub fn update_mem_labels(current_data: &data_farmer::DataCollection) -> (String, String) { let mem_label = if current_data.memory_harvest.mem_total_in_mb == 0 { "".to_string() } else { @@ -360,13 +336,11 @@ pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> (String ) }; - debug!("{:?}", mem_label); - (mem_label, swap_label) } pub fn convert_network_data_points( - current_data: &data_janitor::DataCollection, + current_data: &data_farmer::DataCollection, ) -> ConvertedNetworkData { let mut rx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new(); diff --git a/src/main.rs b/src/main.rs index 2e40c3cc..07a4d0a9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,7 +36,7 @@ mod constants; mod data_conversion; use app::data_harvester; -use app::data_harvester::processes::ProcessData; +use app::data_harvester::processes::ProcessHarvest; use constants::TICK_RATE_IN_MILLISECONDS; use data_conversion::*; use std::collections::BTreeMap; @@ -203,13 +203,14 @@ fn main() -> error::Result<()> { } loop { + // TODO: [OPT] this should not block... let's properly use tick rates and non-blocking, okay? if let Ok(recv) = rx.recv_timeout(Duration::from_millis(TICK_RATE_IN_MILLISECONDS)) { match recv { Event::KeyInput(event) => { if event.modifiers.is_empty() { // If only a code, and no modifiers, don't bother... - // Required to catch for while typing + // Required catch for searching - otherwise you couldn't search with q. if event.code == KeyCode::Char('q') && !app.is_in_search_widget() { break; } @@ -233,7 +234,7 @@ fn main() -> error::Result<()> { if let KeyModifiers::CONTROL = event.modifiers { match event.code { KeyCode::Char('c') => break, - KeyCode::Char('f') => app.toggle_searching(), // Note that this is fine for now, assuming '/' does not do anything other than search. + KeyCode::Char('f') => app.enable_searching(), KeyCode::Left | KeyCode::Char('h') => app.move_left(), KeyCode::Right | KeyCode::Char('l') => app.move_right(), KeyCode::Up | KeyCode::Char('k') => app.move_up(), @@ -245,6 +246,7 @@ fn main() -> error::Result<()> { app.reset(); } } + // TODO: [SEARCH] Rename "simple" search to just... search without cases... KeyCode::Char('s') => app.toggle_simple_search(), KeyCode::Char('a') => app.skip_cursor_beginning(), KeyCode::Char('e') => app.skip_cursor_end(), @@ -280,11 +282,10 @@ fn main() -> error::Result<()> { app.canvas_data.total_tx_display = network_data.total_tx_display; // Disk - app.canvas_data.disk_data = update_disk_row(&app.data); + app.canvas_data.disk_data = update_disk_row(&app.data_collection); // Temperatures - app.canvas_data.temp_sensor_data = - update_temp_row(&app.data, &app.temperature_type); + app.canvas_data.temp_sensor_data = update_temp_row(&app); // Memory app.canvas_data.mem_data = update_mem_data_points(&app.data_collection); app.canvas_data.swap_data = update_swap_data_points(&app.data_collection); @@ -349,7 +350,7 @@ fn handle_process_sorting(app: &mut app::App) { process_map .iter() .map(|(name, data)| { - ProcessData { + ProcessHarvest { pid: 0, // Irrelevant cpu_usage_percent: data.0, mem_usage_percent: data.1, From 3d4de7867ce30a68b866ca8b0c37f64f673129d8 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:06:06 -0500 Subject: [PATCH 15/26] Added cleaning function. --- src/app/data_farmer.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs index 0acb7343..44d69f62 100644 --- a/src/app/data_farmer.rs +++ b/src/app/data_farmer.rs @@ -77,8 +77,19 @@ impl Default for DataCollection { } impl DataCollection { - pub fn clean_data(&mut self) { - // TODO: [OPT] To implement to clean + pub fn clean_data(&mut self, max_time_millis: u128) { + let current_time = Instant::now(); + + let mut remove_index = 0; + for entry in &self.timed_data_vec { + if current_time.duration_since(entry.0).as_millis() > max_time_millis { + remove_index += 1; + } else { + break; + } + } + + self.timed_data_vec.drain(0..remove_index); } pub fn eat_data(&mut self, harvested_data: &Data) { From 63299afaf0b9d68d44abb3ca24de128bee272824 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:11:00 -0500 Subject: [PATCH 16/26] Added cleaning event loop --- src/app/data_harvester.rs | 24 +----------------------- src/constants.rs | 2 +- src/main.rs | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 24 deletions(-) diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index fc468f7e..b40380c9 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -1,6 +1,6 @@ //! This is the main file to house data collection functions. -use crate::{constants, utils::error::Result}; +use crate::utils::error::Result; use std::{collections::HashMap, time::Instant}; use sysinfo::{System, SystemExt}; @@ -66,13 +66,11 @@ impl Data { pub struct DataState { pub data: Data, sys: System, - stale_max_seconds: u64, prev_pid_stats: HashMap, prev_idle: f64, prev_non_idle: f64, mem_total_kb: u64, temperature_type: temperature::TemperatureType, - last_clean: Instant, // Last time stale data was cleared use_current_cpu_total: bool, } @@ -81,13 +79,11 @@ impl Default for DataState { DataState { data: Data::default(), sys: System::new(), - stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000, prev_pid_stats: HashMap::new(), prev_idle: 0_f64, prev_non_idle: 0_f64, mem_total_kb: 0, temperature_type: temperature::TemperatureType::Celsius, - last_clean: Instant::now(), use_current_cpu_total: false, } } @@ -173,23 +169,5 @@ impl DataState { // Update time self.data.last_collection_time = current_instant; - - // Filter out stale timed entries - let clean_instant = Instant::now(); - if clean_instant.duration_since(self.last_clean).as_secs() > self.stale_max_seconds { - let stale_list: Vec<_> = self - .prev_pid_stats - .iter() - .filter(|&(_, &v)| { - clean_instant.duration_since(v.1).as_secs() > self.stale_max_seconds - }) - .map(|(k, _)| k.clone()) - .collect(); - for stale in stale_list { - self.prev_pid_stats.remove(&stale); - } - - self.last_clean = clean_instant; - } } } diff --git a/src/constants.rs b/src/constants.rs index 1b93ea1b..a0811da7 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,5 +1,5 @@ // TODO: Store like three minutes of data, then change how much is shown based on scaling! -pub const STALE_MAX_MILLISECONDS: u64 = 180 * 1000; // We wish to store at most 60 seconds worth of data. This may change in the future, or be configurable. +pub const STALE_MAX_MILLISECONDS: u128 = 180 * 1000; // We wish to store at most 180 seconds worth of data. This may change in the future, or be configurable. pub const TIME_STARTS_FROM: u64 = 60 * 1000; pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u128 = 1000; diff --git a/src/main.rs b/src/main.rs index 07a4d0a9..76f4563f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,6 +46,7 @@ enum Event { KeyInput(I), MouseInput(J), Update(Box), + Clean, } enum ResetEvent { @@ -175,6 +176,16 @@ fn main() -> error::Result<()> { }); } + // Cleaning loop + { + let tx = tx.clone(); + thread::spawn(move || loop { + thread::sleep(Duration::from_millis( + constants::STALE_MAX_MILLISECONDS as u64, + )); + tx.send(Event::Clean).unwrap(); + }); + } // Event loop let (rtx, rrx) = mpsc::channel(); { @@ -301,6 +312,10 @@ fn main() -> error::Result<()> { handle_process_sorting(&mut app); } } + Event::Clean => { + app.data_collection + .clean_data(constants::STALE_MAX_MILLISECONDS); + } } } From c415dfd88c6fd3113a25a165dced68d47eca596f Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:21:31 -0500 Subject: [PATCH 17/26] Changed wording for case match default in flags --- README.md | 2 +- src/main.rs | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ef841837..6867617d 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Run using `btm`. - `-g`, `--group` will group together processes with the same name by default (equivalent to pressing `Tab`). -- `-s`, `--simple_search` will default the search to simple search rather than regex search. +- `-i`, `--case_insensitive` will default to not matching cases when searching processes. ### Keybindings diff --git a/src/main.rs b/src/main.rs index 76f4563f..49a9cd10 100644 --- a/src/main.rs +++ b/src/main.rs @@ -72,9 +72,9 @@ fn main() -> error::Result<()> { (@arg LEFT_LEGEND: -l --left_legend "Puts external chart legends on the left side rather than the default right side.") (@arg USE_CURR_USAGE: -u --current_usage "Within Linux, sets a process' CPU usage to be based on the total current CPU usage, rather than assuming 100% usage.") //(@arg CONFIG_LOCATION: -co --config +takes_value "Sets the location of the config file. Expects a config file in the JSON format.") - (@arg BASIC_MODE: -b --basic "Sets bottom to basic mode, not showing graphs and only showing basic tables.") + //(@arg BASIC_MODE: -b --basic "Sets bottom to basic mode, not showing graphs and only showing basic tables.") (@arg GROUP_PROCESSES: -g --group "Groups processes with the same name together on launch.") - (@arg SEARCH_DEFAULT_USE_SIMPLE: -s --simple_search "Uses a simple case-insensitive string comparison to search processes by default.") + (@arg CASE_INSENSITIVE_DEFAULT: -i --case_insensitive "Do not match case when searching processes by default.") ) .get_matches(); @@ -132,7 +132,7 @@ fn main() -> error::Result<()> { } // Set default search method - if matches.is_present("SEARCH_DEFAULT_USE_SIMPLE") { + if matches.is_present("CASE_INSENSITIVE_DEFAULT") { app.use_simple = true; } From f32035b8d50b7f0aacaa16a3168de6e27c7f1c5f Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:38:36 -0500 Subject: [PATCH 18/26] Tweaked the chart ratios --- src/canvas.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/canvas.rs b/src/canvas.rs index 2c3ce977..caf1bbb6 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -277,8 +277,8 @@ pub fn draw_data( .margin(1) .constraints( [ - Constraint::Percentage(33), - Constraint::Percentage(34), + Constraint::Percentage(30), + Constraint::Percentage(36), Constraint::Percentage(34), ] .as_ref(), @@ -971,7 +971,8 @@ fn draw_processes_table( app_state.currently_selected_process_position, ); - let sliced_vec: Vec = (&process_data[start_position as usize..]).to_vec(); + let sliced_vec: Vec = + (&process_data[start_position as usize..]).to_vec(); let mut process_counter = 0; // Draw! From 2d25abaa16b935b54b582a4b5a34dd68d758ad23 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:42:14 -0500 Subject: [PATCH 19/26] Removed redundant logic in cpu chart causing average to show the wrong colour --- src/canvas.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/src/canvas.rs b/src/canvas.rs index caf1bbb6..6011c14b 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -401,18 +401,8 @@ fn draw_cpu_graph(f: &mut Frame, app_state: &app::App, d let mut cpu_entries_vec: Vec<(Style, Vec<(f64, f64)>)> = Vec::new(); for (i, cpu) in cpu_data.iter().enumerate() { - let mut avg_cpu_exist_offset = 0; - if app_state.show_average_cpu { - if i == 0 { - // Skip, we want to render the average cpu last! - continue; - } else { - avg_cpu_exist_offset = 1; - } - } - cpu_entries_vec.push(( - Style::default().fg(COLOUR_LIST[(i - avg_cpu_exist_offset) % COLOUR_LIST.len()]), + Style::default().fg(COLOUR_LIST[(i) % COLOUR_LIST.len()]), cpu.cpu_data .iter() .map(<(f64, f64)>::from) @@ -423,7 +413,7 @@ fn draw_cpu_graph(f: &mut Frame, app_state: &app::App, d if app_state.show_average_cpu { if let Some(avg_cpu_entry) = cpu_data.first() { cpu_entries_vec.push(( - Style::default().fg(COLOUR_LIST[(cpu_data.len() - 1) % COLOUR_LIST.len()]), + Style::default().fg(COLOUR_LIST[0]), avg_cpu_entry .cpu_data .iter() From 4f31c6ee02e7f31c84f894d9e83106d7b039108e Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Tue, 28 Jan 2020 22:48:29 -0500 Subject: [PATCH 20/26] Lowered stale timing again --- src/app/data_farmer.rs | 2 +- src/constants.rs | 3 +-- src/main.rs | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs index 44d69f62..369845e3 100644 --- a/src/app/data_farmer.rs +++ b/src/app/data_farmer.rs @@ -82,7 +82,7 @@ impl DataCollection { let mut remove_index = 0; for entry in &self.timed_data_vec { - if current_time.duration_since(entry.0).as_millis() > max_time_millis { + if current_time.duration_since(entry.0).as_millis() >= max_time_millis { remove_index += 1; } else { break; diff --git a/src/constants.rs b/src/constants.rs index a0811da7..bd2edc9d 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -1,5 +1,4 @@ -// TODO: Store like three minutes of data, then change how much is shown based on scaling! -pub const STALE_MAX_MILLISECONDS: u128 = 180 * 1000; // We wish to store at most 180 seconds worth of data. This may change in the future, or be configurable. +pub const STALE_MAX_MILLISECONDS: u128 = 60 * 1000; // How long to store data pub const TIME_STARTS_FROM: u64 = 60 * 1000; pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u128 = 1000; diff --git a/src/main.rs b/src/main.rs index 49a9cd10..dde51164 100644 --- a/src/main.rs +++ b/src/main.rs @@ -181,7 +181,7 @@ fn main() -> error::Result<()> { let tx = tx.clone(); thread::spawn(move || loop { thread::sleep(Duration::from_millis( - constants::STALE_MAX_MILLISECONDS as u64, + constants::STALE_MAX_MILLISECONDS as u64 + 5000, )); tx.send(Event::Clean).unwrap(); }); From 971384cf3a0b30c1e3456a508fb475c6ef606f5a Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Fri, 31 Jan 2020 20:49:30 -0500 Subject: [PATCH 21/26] New way of doing referencing previous pid stats without having to GC - just write a new one every time... --- README.md | 2 +- src/app/data_farmer.rs | 2 +- src/app/data_harvester/processes.rs | 27 ++++++++++++++++----------- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 6867617d..0732cb5e 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ Run using `btm`. - `-g`, `--group` will group together processes with the same name by default (equivalent to pressing `Tab`). -- `-i`, `--case_insensitive` will default to not matching cases when searching processes. +- `-i`, `--case_insensitive` will default to not matching case when searching processes. ### Keybindings diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs index 369845e3..02b725cc 100644 --- a/src/app/data_farmer.rs +++ b/src/app/data_farmer.rs @@ -276,7 +276,7 @@ pub fn generate_joining_points( 500, ); - for itx in (0..num_points).step_by(1) { + for itx in 0..num_points { points.push(( time_difference - (itx as f64 / num_points as f64 * time_difference), start_y + (itx as f64 / num_points as f64 * value_difference), diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs index c72c6d01..8f24b94d 100644 --- a/src/app/data_harvester/processes.rs +++ b/src/app/data_harvester/processes.rs @@ -144,12 +144,13 @@ fn get_process_cpu_stats(pid: u32) -> std::io::Result { /// Note that cpu_percentage should be represented WITHOUT the \times 100 factor! fn linux_cpu_usage( pid: u32, cpu_usage: f64, cpu_percentage: f64, - previous_pid_stats: &mut HashMap, use_current_cpu_total: bool, + prev_pid_stats: &HashMap, + new_pid_stats: &mut HashMap, use_current_cpu_total: bool, curr_time: &Instant, ) -> std::io::Result { // Based heavily on https://stackoverflow.com/a/23376195 and https://stackoverflow.com/a/1424556 - let before_proc_val: f64 = if previous_pid_stats.contains_key(&pid.to_string()) { - previous_pid_stats + let before_proc_val: f64 = if prev_pid_stats.contains_key(&pid.to_string()) { + prev_pid_stats .get(&pid.to_string()) .unwrap_or(&(0_f64, *curr_time)) .0 @@ -167,10 +168,7 @@ fn linux_cpu_usage( (after_proc_val - before_proc_val) / cpu_usage * 100_f64 );*/ - let entry = previous_pid_stats - .entry(pid.to_string()) - .or_insert((after_proc_val, *curr_time)); - *entry = (after_proc_val, *curr_time); + new_pid_stats.insert(pid.to_string(), (after_proc_val, *curr_time)); if use_current_cpu_total { Ok((after_proc_val - before_proc_val) / cpu_usage * 100_f64) } else { @@ -180,7 +178,8 @@ fn linux_cpu_usage( fn convert_ps( process: &str, cpu_usage: f64, cpu_percentage: f64, - prev_pid_stats: &mut HashMap, use_current_cpu_total: bool, + prev_pid_stats: &HashMap, + new_pid_stats: &mut HashMap, use_current_cpu_total: bool, curr_time: &Instant, ) -> std::io::Result { if process.trim().to_string().is_empty() { @@ -214,6 +213,7 @@ fn convert_ps( cpu_usage, cpu_percentage, prev_pid_stats, + new_pid_stats, use_current_cpu_total, curr_time, )?, @@ -223,8 +223,8 @@ fn convert_ps( pub fn get_sorted_processes_list( sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64, - prev_pid_stats: &mut std::collections::HashMap, - use_current_cpu_total: bool, mem_total_kb: u64, curr_time: &Instant, + prev_pid_stats: &mut HashMap, use_current_cpu_total: bool, + mem_total_kb: u64, curr_time: &Instant, ) -> crate::utils::error::Result> { let mut process_vector: Vec = Vec::new(); @@ -240,12 +240,15 @@ pub fn get_sorted_processes_list( if let Ok((cpu_usage, cpu_percentage)) = cpu_calc { let process_stream = split_string.collect::>(); + let mut new_pid_stats: HashMap = HashMap::new(); + for process in process_stream { if let Ok(process_object) = convert_ps( process, cpu_usage, cpu_percentage, - prev_pid_stats, + &prev_pid_stats, + &mut new_pid_stats, use_current_cpu_total, curr_time, ) { @@ -254,6 +257,8 @@ pub fn get_sorted_processes_list( } } } + + *prev_pid_stats = new_pid_stats; } else { error!("Unable to properly parse CPU data in Linux."); error!("Result: {:?}", cpu_calc.err()); From 67ff2f28eb1c60f022f64305bb25e2971fb1e716 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Fri, 31 Jan 2020 21:01:33 -0500 Subject: [PATCH 22/26] Tweaked point generation a bit again --- src/app/data_farmer.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs index 02b725cc..f370aefc 100644 --- a/src/app/data_farmer.rs +++ b/src/app/data_farmer.rs @@ -264,13 +264,18 @@ pub fn generate_joining_points( let mut points: Vec<(TimeOffset, Value)> = Vec::new(); // Convert time floats first: - let time_difference = (*end_x).duration_since(*start_x).as_millis() as f64; + let tmp_time_diff = (*end_x).duration_since(*start_x).as_millis() as f64; + let time_difference = if tmp_time_diff == 0.0 { + 0.001 + } else { + tmp_time_diff + }; let value_difference = end_y - start_y; // Let's generate... about this many points! let num_points = std::cmp::min( std::cmp::max( - (value_difference.abs() / (time_difference + 0.0001) * 500.0) as u64, + (value_difference.abs() / time_difference * 500.0) as u64, 100, ), 500, From 0bf7f32473a86bd4bb029b5dd61a3cb1d19a31ca Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sat, 1 Feb 2020 23:49:44 -0500 Subject: [PATCH 23/26] Optimizing processes... --- README.md | 4 +- src/app.rs | 186 +++++++++++++--------------- src/app/data_farmer.rs | 23 ++-- src/app/data_harvester.rs | 3 - src/app/data_harvester/processes.rs | 56 --------- src/canvas.rs | 100 +++++++-------- src/data_conversion.rs | 157 ++++++++--------------- src/main.rs | 158 ++++++++++++----------- src/utils/gen_util.rs | 26 ++++ 9 files changed, 305 insertions(+), 408 deletions(-) diff --git a/README.md b/README.md index 0732cb5e..2b877a6f 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,9 @@ Run using `btm`. - `-g`, `--group` will group together processes with the same name by default (equivalent to pressing `Tab`). -- `-i`, `--case_insensitive` will default to not matching case when searching processes. +- `-i`, `--case_insensitive` will default to not matching case + +when searching processes. ### Keybindings diff --git a/src/app.rs b/src/app.rs index 5b398c93..c1682728 100644 --- a/src/app.rs +++ b/src/app.rs @@ -5,12 +5,12 @@ use std::time::Instant; pub mod data_farmer; use data_farmer::*; -use crate::{canvas, constants, data_conversion::ConvertedProcessHarvest, utils::error::Result}; +use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; mod process_killer; #[derive(Clone, Copy)] -pub enum ApplicationPosition { +pub enum WidgetPosition { Cpu, Mem, Disk, @@ -68,7 +68,7 @@ pub struct App { pub temperature_type: temperature::TemperatureType, pub update_rate_in_milliseconds: u64, pub show_average_cpu: bool, - pub current_application_position: ApplicationPosition, + pub current_widget_selected: WidgetPosition, pub data: data_harvester::Data, awaiting_second_char: bool, second_char: char, @@ -76,12 +76,12 @@ pub struct App { pub show_help: bool, pub show_dd: bool, pub dd_err: Option, - to_delete_process_list: Option>, + to_delete_process_list: Option>, pub is_frozen: bool, pub left_legend: bool, pub use_current_cpu_total: bool, last_key_press: Instant, - pub canvas_data: canvas::CanvasData, + pub canvas_data: canvas::DisplayableData, enable_grouping: bool, enable_searching: bool, current_search_query: String, @@ -105,7 +105,7 @@ impl App { temperature_type, update_rate_in_milliseconds, show_average_cpu, - current_application_position: ApplicationPosition::Process, + current_widget_selected: WidgetPosition::Process, scroll_direction: ScrollDirection::DOWN, currently_selected_process_position: 0, currently_selected_disk_position: 0, @@ -127,7 +127,7 @@ impl App { left_legend, use_current_cpu_total, last_key_press: Instant::now(), - canvas_data: canvas::CanvasData::default(), + canvas_data: canvas::DisplayableData::default(), enable_grouping: false, enable_searching: false, current_search_query: String::default(), @@ -144,7 +144,7 @@ impl App { self.show_help = false; self.show_dd = false; if self.enable_searching { - self.current_application_position = ApplicationPosition::Process; + self.current_widget_selected = WidgetPosition::Process; self.enable_searching = false; } self.current_search_query = String::new(); @@ -161,7 +161,7 @@ impl App { self.to_delete_process_list = None; self.dd_err = None; } else if self.enable_searching { - self.current_application_position = ApplicationPosition::Process; + self.current_widget_selected = WidgetPosition::Process; self.enable_searching = false; } } @@ -178,16 +178,17 @@ impl App { pub fn toggle_grouping(&mut self) { // Disallow usage whilst in a dialog and only in processes if !self.is_in_dialog() { - if let ApplicationPosition::Process = self.current_application_position { + if let WidgetPosition::Process = self.current_widget_selected { self.enable_grouping = !(self.enable_grouping); + self.update_process_gui = true; } } } pub fn on_tab(&mut self) { - match self.current_application_position { - ApplicationPosition::Process => self.toggle_grouping(), - ApplicationPosition::Disk => {} + match self.current_widget_selected { + WidgetPosition::Process => self.toggle_grouping(), + WidgetPosition::Disk => {} _ => {} } } @@ -198,11 +199,11 @@ impl App { pub fn enable_searching(&mut self) { if !self.is_in_dialog() { - match self.current_application_position { - ApplicationPosition::Process | ApplicationPosition::ProcessSearch => { + match self.current_widget_selected { + WidgetPosition::Process | WidgetPosition::ProcessSearch => { // Toggle on self.enable_searching = true; - self.current_application_position = ApplicationPosition::ProcessSearch; + self.current_widget_selected = WidgetPosition::ProcessSearch; } _ => {} } @@ -214,7 +215,7 @@ impl App { } pub fn is_in_search_widget(&self) -> bool { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { true } else { false @@ -223,7 +224,7 @@ impl App { pub fn search_with_pid(&mut self) { if !self.is_in_dialog() && self.is_searching() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.searching_pid = true; } } @@ -231,7 +232,7 @@ impl App { pub fn search_with_name(&mut self) { if !self.is_in_dialog() && self.is_searching() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.searching_pid = false; } } @@ -247,7 +248,7 @@ impl App { pub fn toggle_simple_search(&mut self) { if !self.is_in_dialog() && self.is_searching() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.use_simple = !self.use_simple; // Update to latest (when simple is on this is not updated) @@ -287,7 +288,7 @@ impl App { } pub fn on_backspace(&mut self) { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { if self.current_cursor_position > 0 { self.current_cursor_position -= 1; self.current_search_query @@ -311,7 +312,7 @@ impl App { pub fn on_up_key(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { } else { self.decrement_position_count(); } @@ -320,7 +321,7 @@ impl App { pub fn on_down_key(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { } else { self.increment_position_count(); } @@ -329,7 +330,7 @@ impl App { pub fn on_left_key(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { if self.current_cursor_position > 0 { self.current_cursor_position -= 1; } @@ -339,7 +340,7 @@ impl App { pub fn on_right_key(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { if self.current_cursor_position < self.current_search_query.len() { self.current_cursor_position += 1; } @@ -349,7 +350,7 @@ impl App { pub fn skip_cursor_beginning(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.current_cursor_position = 0; } } @@ -357,7 +358,7 @@ impl App { pub fn skip_cursor_end(&mut self) { if !self.is_in_dialog() { - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.current_cursor_position = self.current_search_query.len(); } } @@ -375,7 +376,7 @@ impl App { } self.last_key_press = current_key_press_inst; - if let ApplicationPosition::ProcessSearch = self.current_application_position { + if let WidgetPosition::ProcessSearch = self.current_widget_selected { self.current_search_query .insert(self.current_cursor_position, caught_char); self.current_cursor_position += 1; @@ -394,31 +395,14 @@ impl App { self.enable_searching(); } 'd' => { - if let ApplicationPosition::Process = self.current_application_position { + if let WidgetPosition::Process = self.current_widget_selected { if self.awaiting_second_char && self.second_char == 'd' { self.awaiting_second_char = false; self.second_char = ' '; - let current_process = if self.is_grouped() { - let mut res: Vec = Vec::new(); - for pid in &self.canvas_data.grouped_process_data - [self.currently_selected_process_position as usize] - .group - { - let result = self - .canvas_data - .process_data - .iter() - .find(|p| p.pid == *pid); - if let Some(process) = result { - res.push((*process).clone()); - } - } - res - } else { - vec![self.canvas_data.process_data - [self.currently_selected_process_position as usize] - .clone()] - }; + let current_process = Vec::new(); + + // TODO: FIX THIS SHITTTTTT + self.to_delete_process_list = Some(current_process); self.show_dd = true; self.reset_multi_tap_keys(); @@ -513,7 +497,7 @@ impl App { pub fn kill_highlighted_process(&mut self) -> Result<()> { // Technically unnecessary but this is a good check... - if let ApplicationPosition::Process = self.current_application_position { + if let WidgetPosition::Process = self.current_widget_selected { if let Some(current_selected_processes) = &(self.to_delete_process_list) { for current_selected_process in current_selected_processes { process_killer::kill_process_given_pid(current_selected_process.pid)?; @@ -524,7 +508,7 @@ impl App { Ok(()) } - pub fn get_current_highlighted_process_list(&self) -> Option> { + pub fn get_current_highlighted_process_list(&self) -> Option> { self.to_delete_process_list.clone() } @@ -540,12 +524,12 @@ impl App { // PROC_SEARCH -(up)> Disk, -(down)> PROC, -(left)> Network pub fn move_left(&mut self) { if !self.is_in_dialog() { - self.current_application_position = match self.current_application_position { - ApplicationPosition::Process => ApplicationPosition::Network, - ApplicationPosition::ProcessSearch => ApplicationPosition::Network, - ApplicationPosition::Disk => ApplicationPosition::Mem, - ApplicationPosition::Temp => ApplicationPosition::Mem, - _ => self.current_application_position, + self.current_widget_selected = match self.current_widget_selected { + WidgetPosition::Process => WidgetPosition::Network, + WidgetPosition::ProcessSearch => WidgetPosition::Network, + WidgetPosition::Disk => WidgetPosition::Mem, + WidgetPosition::Temp => WidgetPosition::Mem, + _ => self.current_widget_selected, }; self.reset_multi_tap_keys(); } @@ -553,10 +537,10 @@ impl App { pub fn move_right(&mut self) { if !self.is_in_dialog() { - self.current_application_position = match self.current_application_position { - ApplicationPosition::Mem => ApplicationPosition::Temp, - ApplicationPosition::Network => ApplicationPosition::Process, - _ => self.current_application_position, + self.current_widget_selected = match self.current_widget_selected { + WidgetPosition::Mem => WidgetPosition::Temp, + WidgetPosition::Network => WidgetPosition::Process, + _ => self.current_widget_selected, }; self.reset_multi_tap_keys(); } @@ -564,20 +548,20 @@ impl App { pub fn move_up(&mut self) { if !self.is_in_dialog() { - self.current_application_position = match self.current_application_position { - ApplicationPosition::Mem => ApplicationPosition::Cpu, - ApplicationPosition::Network => ApplicationPosition::Mem, - ApplicationPosition::Process => { + self.current_widget_selected = match self.current_widget_selected { + WidgetPosition::Mem => WidgetPosition::Cpu, + WidgetPosition::Network => WidgetPosition::Mem, + WidgetPosition::Process => { if self.is_searching() { - ApplicationPosition::ProcessSearch + WidgetPosition::ProcessSearch } else { - ApplicationPosition::Disk + WidgetPosition::Disk } } - ApplicationPosition::ProcessSearch => ApplicationPosition::Disk, - ApplicationPosition::Temp => ApplicationPosition::Cpu, - ApplicationPosition::Disk => ApplicationPosition::Temp, - _ => self.current_application_position, + WidgetPosition::ProcessSearch => WidgetPosition::Disk, + WidgetPosition::Temp => WidgetPosition::Cpu, + WidgetPosition::Disk => WidgetPosition::Temp, + _ => self.current_widget_selected, }; self.reset_multi_tap_keys(); } @@ -585,19 +569,19 @@ impl App { pub fn move_down(&mut self) { if !self.is_in_dialog() { - self.current_application_position = match self.current_application_position { - ApplicationPosition::Cpu => ApplicationPosition::Mem, - ApplicationPosition::Mem => ApplicationPosition::Network, - ApplicationPosition::Temp => ApplicationPosition::Disk, - ApplicationPosition::Disk => { + self.current_widget_selected = match self.current_widget_selected { + WidgetPosition::Cpu => WidgetPosition::Mem, + WidgetPosition::Mem => WidgetPosition::Network, + WidgetPosition::Temp => WidgetPosition::Disk, + WidgetPosition::Disk => { if self.is_searching() { - ApplicationPosition::ProcessSearch + WidgetPosition::ProcessSearch } else { - ApplicationPosition::Process + WidgetPosition::Process } } - ApplicationPosition::ProcessSearch => ApplicationPosition::Process, - _ => self.current_application_position, + WidgetPosition::ProcessSearch => WidgetPosition::Process, + _ => self.current_widget_selected, }; self.reset_multi_tap_keys(); } @@ -605,11 +589,11 @@ impl App { pub fn skip_to_first(&mut self) { if !self.is_in_dialog() { - match self.current_application_position { - ApplicationPosition::Process => self.currently_selected_process_position = 0, - ApplicationPosition::Temp => self.currently_selected_temperature_position = 0, - ApplicationPosition::Disk => self.currently_selected_disk_position = 0, - ApplicationPosition::Cpu => self.currently_selected_cpu_table_position = 0, + match self.current_widget_selected { + WidgetPosition::Process => self.currently_selected_process_position = 0, + WidgetPosition::Temp => self.currently_selected_temperature_position = 0, + WidgetPosition::Disk => self.currently_selected_disk_position = 0, + WidgetPosition::Cpu => self.currently_selected_cpu_table_position = 0, _ => {} } @@ -620,19 +604,19 @@ impl App { pub fn skip_to_last(&mut self) { if !self.is_in_dialog() { - match self.current_application_position { - ApplicationPosition::Process => { + match self.current_widget_selected { + WidgetPosition::Process => { self.currently_selected_process_position = self.data.list_of_processes.len() as i64 - 1 } - ApplicationPosition::Temp => { + WidgetPosition::Temp => { self.currently_selected_temperature_position = self.data.temperature_sensors.len() as i64 - 1 } - ApplicationPosition::Disk => { + WidgetPosition::Disk => { self.currently_selected_disk_position = self.data.disks.len() as i64 - 1 } - ApplicationPosition::Cpu => { + WidgetPosition::Cpu => { self.currently_selected_cpu_table_position = self.canvas_data.cpu_data.len() as i64 - 1; } @@ -645,11 +629,11 @@ impl App { pub fn decrement_position_count(&mut self) { if !self.is_in_dialog() { - match self.current_application_position { - ApplicationPosition::Process => self.change_process_position(-1), - ApplicationPosition::Temp => self.change_temp_position(-1), - ApplicationPosition::Disk => self.change_disk_position(-1), - ApplicationPosition::Cpu => self.change_cpu_table_position(-1), // TODO: Temporary, may change if we add scaling + match self.current_widget_selected { + WidgetPosition::Process => self.change_process_position(-1), + WidgetPosition::Temp => self.change_temp_position(-1), + WidgetPosition::Disk => self.change_disk_position(-1), + WidgetPosition::Cpu => self.change_cpu_table_position(-1), // TODO: Temporary, may change if we add scaling _ => {} } self.scroll_direction = ScrollDirection::UP; @@ -659,11 +643,11 @@ impl App { pub fn increment_position_count(&mut self) { if !self.is_in_dialog() { - match self.current_application_position { - ApplicationPosition::Process => self.change_process_position(1), - ApplicationPosition::Temp => self.change_temp_position(1), - ApplicationPosition::Disk => self.change_disk_position(1), - ApplicationPosition::Cpu => self.change_cpu_table_position(1), // TODO: Temporary, may change if we add scaling + match self.current_widget_selected { + WidgetPosition::Process => self.change_process_position(1), + WidgetPosition::Temp => self.change_temp_position(1), + WidgetPosition::Disk => self.change_disk_position(1), + WidgetPosition::Cpu => self.change_cpu_table_position(1), // TODO: Temporary, may change if we add scaling _ => {} } self.scroll_direction = ScrollDirection::DOWN; diff --git a/src/app/data_farmer.rs b/src/app/data_farmer.rs index f370aefc..4793a109 100644 --- a/src/app/data_farmer.rs +++ b/src/app/data_farmer.rs @@ -49,7 +49,7 @@ pub struct DataCollection { pub memory_harvest: mem::MemHarvest, pub swap_harvest: mem::MemHarvest, pub cpu_harvest: cpu::CPUHarvest, - pub process_harvest: processes::ProcessHarvest, + pub process_harvest: Vec, pub disk_harvest: Vec, pub io_harvest: disks::IOHarvest, pub io_labels: Vec<(u64, u64)>, @@ -66,7 +66,7 @@ impl Default for DataCollection { memory_harvest: mem::MemHarvest::default(), swap_harvest: mem::MemHarvest::default(), cpu_harvest: cpu::CPUHarvest::default(), - process_harvest: processes::ProcessHarvest::default(), + process_harvest: Vec::default(), disk_harvest: Vec::default(), io_harvest: disks::IOHarvest::default(), io_labels: Vec::default(), @@ -106,14 +106,15 @@ impl DataCollection { self.eat_cpu(&harvested_data, &harvested_time, &mut new_entry); // Temp - self.eat_temp(&harvested_data, &harvested_time, &mut new_entry); + self.eat_temp(&harvested_data); // Disks - self.eat_disks(&harvested_data, &harvested_time, &mut new_entry); + self.eat_disks(&harvested_data, &harvested_time); // Processes + self.eat_proc(&harvested_data); - // And we're done eating. + // And we're done eating. Update time and push the new entry! self.current_instant = harvested_time; self.timed_data_vec.push((harvested_time, new_entry)); } @@ -212,16 +213,12 @@ impl DataCollection { self.cpu_harvest = harvested_data.cpu.clone(); } - fn eat_temp( - &mut self, harvested_data: &Data, _harvested_time: &Instant, _new_entry: &mut TimedData, - ) { + fn eat_temp(&mut self, harvested_data: &Data) { // TODO: [PO] To implement self.temp_harvest = harvested_data.temperature_sensors.clone(); } - fn eat_disks( - &mut self, harvested_data: &Data, harvested_time: &Instant, _new_entry: &mut TimedData, - ) { + fn eat_disks(&mut self, harvested_data: &Data, harvested_time: &Instant) { // TODO: [PO] To implement let time_since_last_harvest = harvested_time @@ -256,6 +253,10 @@ impl DataCollection { self.disk_harvest = harvested_data.disks.clone(); self.io_harvest = harvested_data.io.clone(); } + + fn eat_proc(&mut self, harvested_data: &Data) { + self.process_harvest = harvested_data.list_of_processes.clone(); + } } pub fn generate_joining_points( diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index b40380c9..9ad176b5 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -25,7 +25,6 @@ pub struct Data { pub temperature_sensors: Vec, pub network: network::NetworkHarvest, pub list_of_processes: Vec, - pub grouped_list_of_processes: Option>, pub disks: Vec, pub io: disks::IOHarvest, pub last_collection_time: Instant, @@ -39,7 +38,6 @@ impl Default for Data { swap: mem::MemHarvest::default(), temperature_sensors: Vec::default(), list_of_processes: Vec::default(), - grouped_list_of_processes: None, disks: Vec::default(), io: disks::IOHarvest::default(), network: network::NetworkHarvest::default(), @@ -53,7 +51,6 @@ impl Data { self.io = disks::IOHarvest::default(); self.temperature_sensors = Vec::new(); self.list_of_processes = Vec::new(); - self.grouped_list_of_processes = None; self.disks = Vec::new(); self.network.first_run_cleanup(); diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs index 8f24b94d..0b8371fe 100644 --- a/src/app/data_harvester/processes.rs +++ b/src/app/data_harvester/processes.rs @@ -1,5 +1,4 @@ use crate::utils::error; -use std::cmp::Ordering; use std::{collections::HashMap, process::Command, time::Instant}; use sysinfo::{ProcessExt, System, SystemExt}; @@ -23,7 +22,6 @@ pub struct ProcessHarvest { pub cpu_usage_percent: f64, pub mem_usage_percent: f64, pub name: String, - pub pid_vec: Option>, } fn cpu_usage_calculation( @@ -100,31 +98,6 @@ fn cpu_usage_calculation( Ok((result, cpu_percentage)) } -fn get_ordering( - a_val: T, b_val: T, reverse_order: bool, -) -> std::cmp::Ordering { - match a_val.partial_cmp(&b_val) { - Some(x) => match x { - Ordering::Greater => { - if reverse_order { - std::cmp::Ordering::Less - } else { - std::cmp::Ordering::Greater - } - } - Ordering::Less => { - if reverse_order { - std::cmp::Ordering::Greater - } else { - std::cmp::Ordering::Less - } - } - Ordering::Equal => Ordering::Equal, - }, - None => Ordering::Equal, - } -} - fn get_process_cpu_stats(pid: u32) -> std::io::Result { let mut path = std::path::PathBuf::new(); path.push("/proc"); @@ -188,7 +161,6 @@ fn convert_ps( name: "".to_string(), mem_usage_percent: 0.0, cpu_usage_percent: 0.0, - pid_vec: None, }); } @@ -217,7 +189,6 @@ fn convert_ps( use_current_cpu_total, curr_time, )?, - pid_vec: None, }) } @@ -292,36 +263,9 @@ pub fn get_sorted_processes_list( name, mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64, cpu_usage_percent: f64::from(process_val.cpu_usage()), - pid_vec: None, }); } } Ok(process_vector) } - -pub fn sort_processes( - process_vector: &mut Vec, sorting_method: &ProcessSorting, reverse_order: bool, -) { - // Always sort alphabetically first! - process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, false)); - - match sorting_method { - ProcessSorting::CPU => { - process_vector.sort_by(|a, b| { - get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order) - }); - } - ProcessSorting::MEM => { - process_vector.sort_by(|a, b| { - get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order) - }); - } - ProcessSorting::PID => { - process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order)); - } - ProcessSorting::NAME => { - process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, reverse_order)) - } - } -} diff --git a/src/canvas.rs b/src/canvas.rs index 6011c14b..093e6815 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -1,9 +1,11 @@ use crate::{ - app, constants, - data_conversion::{ConvertedCpuData, ConvertedProcessHarvest}, + app::{self, data_harvester::processes::ProcessHarvest}, + constants, + data_conversion::{ConvertedCpuData, ConvertedProcessData}, utils::{error, gen_util::*}, }; use std::cmp::max; +use std::collections::HashMap; use tui::{ backend, layout::{Alignment, Constraint, Direction, Layout, Rect}, @@ -85,7 +87,7 @@ lazy_static! { } #[derive(Default)] -pub struct CanvasData { +pub struct DisplayableData { pub rx_display: String, pub tx_display: String, pub total_rx_display: String, @@ -94,8 +96,9 @@ pub struct CanvasData { pub network_data_tx: Vec<(f64, f64)>, pub disk_data: Vec>, pub temp_sensor_data: Vec>, - pub process_data: Vec, - pub grouped_process_data: Vec, + pub process_data: HashMap, // Not final + pub grouped_process_data: Vec, // Not final + pub finalized_process_data: Vec, // What's actually displayed pub mem_label: String, pub swap_label: String, pub mem_data: Vec<(f64, f64)>, @@ -441,8 +444,8 @@ fn draw_cpu_graph(f: &mut Frame, app_state: &app::App, d Block::default() .title("CPU") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) @@ -485,8 +488,8 @@ fn draw_cpu_legend( .map(|(itx, cpu_string_row)| { Row::StyledData( cpu_string_row.iter(), - match app_state.current_application_position { - app::ApplicationPosition::Cpu => { + match app_state.current_widget_selected { + app::WidgetPosition::Cpu => { if cpu_row_counter == app_state.currently_selected_cpu_table_position - start_position { @@ -515,8 +518,8 @@ fn draw_cpu_legend( // Draw Table::new(CPU_LEGEND_HEADER.iter(), cpu_rows) .block(Block::default().borders(Borders::ALL).border_style( - match app_state.current_application_position { - app::ApplicationPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + match app_state.current_widget_selected { + app::WidgetPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }, )) @@ -554,8 +557,8 @@ fn draw_memory_table( // Draw Table::new(MEM_HEADERS.iter(), mapped_mem_rows) .block(Block::default().borders(Borders::ALL).border_style( - match app_state.current_application_position { - app::ApplicationPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + match app_state.current_widget_selected { + app::WidgetPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }, )) @@ -576,9 +579,11 @@ fn draw_memory_graph(f: &mut Frame, app_state: &app::App let x_axis: Axis = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) .bounds([0.0, constants::TIME_STARTS_FROM as f64]); - let y_axis = Axis::default() + + // Offset as the zero value isn't drawn otherwise... + let y_axis: Axis<&str> = Axis::default() .style(Style::default().fg(GRAPH_COLOUR)) - .bounds([-0.5, 100.5]) // Offset as the zero value isn't drawn otherwise... + .bounds([-0.5, 100.5]) .labels(&["0%", "100%"]); let mut mem_canvas_vec: Vec = vec![Dataset::default() @@ -610,8 +615,8 @@ fn draw_memory_graph(f: &mut Frame, app_state: &app::App Block::default() .title("Memory") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) @@ -637,8 +642,8 @@ fn draw_network_graph(f: &mut Frame, app_state: &app::Ap Block::default() .title("Network") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) @@ -715,8 +720,8 @@ fn draw_network_labels( mapped_network, ) .block(Block::default().borders(Borders::ALL).border_style( - match app_state.current_application_position { - app::ApplicationPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + match app_state.current_widget_selected { + app::WidgetPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }, )) @@ -749,8 +754,8 @@ fn draw_temp_table( let temperature_rows = sliced_vec.iter().map(|temp_row| { Row::StyledData( temp_row.iter(), - match app_state.current_application_position { - app::ApplicationPosition::Temp => { + match app_state.current_widget_selected { + app::WidgetPosition::Temp => { if temp_row_counter == app_state.currently_selected_temperature_position - start_position { @@ -782,8 +787,8 @@ fn draw_temp_table( Block::default() .title("Temperatures") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Temp => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Temp => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) @@ -815,8 +820,8 @@ fn draw_disk_table( let disk_rows = sliced_vec.iter().map(|disk| { Row::StyledData( disk.iter(), - match app_state.current_application_position { - app::ApplicationPosition::Disk => { + match app_state.current_widget_selected { + app::WidgetPosition::Disk => { if disk_counter == app_state.currently_selected_disk_position - start_position { disk_counter = -1; Style::default().fg(Color::Black).bg(Color::Cyan) @@ -847,8 +852,8 @@ fn draw_disk_table( Block::default() .title("Disk") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Disk => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Disk => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) @@ -881,8 +886,7 @@ fn draw_search_field( .chars() .enumerate() .map(|(itx, c)| { - if let app::ApplicationPosition::ProcessSearch = app_state.current_application_position - { + if let app::WidgetPosition::ProcessSearch = app_state.current_widget_selected { if itx == cursor_position { return Text::styled( c.to_string(), @@ -893,7 +897,7 @@ fn draw_search_field( Text::styled(c.to_string(), Style::default().fg(TEXT_COLOUR)) }) .collect::>(); - if let app::ApplicationPosition::ProcessSearch = app_state.current_application_position { + if let app::WidgetPosition::ProcessSearch = app_state.current_widget_selected { if cursor_position >= query.len() { query_with_cursor.push(Text::styled( " ".to_string(), @@ -926,8 +930,8 @@ fn draw_search_field( .border_style(if app_state.get_current_regex_matcher().is_err() { Style::default().fg(Color::Red) } else { - match app_state.current_application_position { - app::ApplicationPosition::ProcessSearch => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + match app_state.current_widget_selected { + app::WidgetPosition::ProcessSearch => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, } }), @@ -941,16 +945,15 @@ fn draw_search_field( fn draw_processes_table( f: &mut Frame, app_state: &mut app::App, draw_loc: Rect, ) { - let process_data: &[ConvertedProcessHarvest] = if app_state.is_grouped() { - &app_state.canvas_data.grouped_process_data - } else { - &app_state.canvas_data.process_data - }; + let process_data: &[ConvertedProcessData] = &app_state.canvas_data.finalized_process_data; // Admittedly this is kinda a hack... but we need to: // * Scroll // * Show/hide elements based on scroll position - // As such, we use a process_counter to know when we've hit the process we've currently scrolled to. We also need to move the list - we can + // + // As such, we use a process_counter to know when we've + // hit the process we've currently scrolled to. + // We also need to move the list - we can // do so by hiding some elements! let num_rows = i64::from(draw_loc.height) - 5; @@ -961,26 +964,25 @@ fn draw_processes_table( app_state.currently_selected_process_position, ); - let sliced_vec: Vec = - (&process_data[start_position as usize..]).to_vec(); + let sliced_vec: Vec = (&process_data[start_position as usize..]).to_vec(); let mut process_counter = 0; // Draw! let process_rows = sliced_vec.iter().map(|process| { let stringified_process_vec: Vec = vec![ if app_state.is_grouped() { - process.group.len().to_string() + process.group_pids.len().to_string() } else { process.pid.to_string() }, process.name.clone(), - process.cpu_usage.clone(), - process.mem_usage.clone(), + format!("{:.1}%", process.cpu_usage), + format!("{:.1}%", process.mem_usage), ]; Row::StyledData( stringified_process_vec.into_iter(), - match app_state.current_application_position { - app::ApplicationPosition::Process => { + match app_state.current_widget_selected { + app::WidgetPosition::Process => { if process_counter == app_state.currently_selected_process_position - start_position { @@ -1042,8 +1044,8 @@ fn draw_processes_table( Block::default() .title("Processes") .borders(Borders::ALL) - .border_style(match app_state.current_application_position { - app::ApplicationPosition::Process => *CANVAS_HIGHLIGHTED_BORDER_STYLE, + .border_style(match app_state.current_widget_selected { + app::WidgetPosition::Process => *CANVAS_HIGHLIGHTED_BORDER_STYLE, _ => *CANVAS_BORDER_STYLE, }), ) diff --git a/src/data_conversion.rs b/src/data_conversion.rs index a01842f2..15234c11 100644 --- a/src/data_conversion.rs +++ b/src/data_conversion.rs @@ -2,14 +2,16 @@ //! can actually handle. use crate::{ - app::data_farmer, - app::data_harvester, - app::App, + app::{ + data_farmer, + data_harvester::{self, processes::ProcessHarvest}, + App, + }, constants, utils::gen_util::{get_exact_byte_values, get_simple_byte_values}, }; use constants::*; -use regex::Regex; +use std::collections::HashMap; #[derive(Default, Debug)] pub struct ConvertedNetworkData { @@ -22,12 +24,12 @@ pub struct ConvertedNetworkData { } #[derive(Clone, Default, Debug)] -pub struct ConvertedProcessHarvest { +pub struct ConvertedProcessData { pub pid: u32, pub name: String, - pub cpu_usage: String, - pub mem_usage: String, - pub group: Vec, + pub cpu_usage: f64, + pub mem_usage: f64, + pub group_pids: Vec, } #[derive(Clone, Default, Debug)] @@ -117,103 +119,6 @@ pub fn update_disk_row(current_data: &data_farmer::DataCollection) -> Vec (Vec, Vec) { - let process_vector: Vec = app_data - .list_of_processes - .iter() - .filter(|process| { - if use_pid { - process - .pid - .to_string() - .to_ascii_lowercase() - .contains(matching_string) - } else { - process.name.to_ascii_lowercase().contains(matching_string) - } - }) - .map(|process| return_mapped_process(process)) - .collect::>(); - - let mut grouped_process_vector: Vec = Vec::new(); - if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes { - grouped_process_vector = grouped_list_of_processes - .iter() - .filter(|process| { - if use_pid { - process - .pid - .to_string() - .to_ascii_lowercase() - .contains(matching_string) - } else { - process.name.to_ascii_lowercase().contains(matching_string) - } - }) - .map(|process| return_mapped_process(process)) - .collect::>(); - } - - (process_vector, grouped_process_vector) -} - -pub fn regex_update_process_row( - app_data: &data_harvester::Data, regex_matcher: &std::result::Result, - use_pid: bool, -) -> (Vec, Vec) { - let process_vector: Vec = app_data - .list_of_processes - .iter() - .filter(|process| { - if let Ok(matcher) = regex_matcher { - if use_pid { - matcher.is_match(&process.pid.to_string()) - } else { - matcher.is_match(&process.name) - } - } else { - true - } - }) - .map(|process| return_mapped_process(process)) - .collect::>(); - - let mut grouped_process_vector: Vec = Vec::new(); - if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes { - grouped_process_vector = grouped_list_of_processes - .iter() - .filter(|process| { - if let Ok(matcher) = regex_matcher { - if use_pid { - matcher.is_match(&process.pid.to_string()) - } else { - matcher.is_match(&process.name) - } - } else { - true - } - }) - .map(|process| return_mapped_process(process)) - .collect::>(); - } - - (process_vector, grouped_process_vector) -} - -fn return_mapped_process( - process: &data_harvester::processes::ProcessHarvest, -) -> ConvertedProcessHarvest { - ConvertedProcessHarvest { - pid: process.pid, - name: process.name.to_string(), - cpu_usage: format!("{:.1}%", process.cpu_usage_percent), - mem_usage: format!("{:.1}%", process.mem_usage_percent), - group: vec![], - } -} - pub fn update_cpu_data_points( show_avg_cpu: bool, current_data: &data_farmer::DataCollection, ) -> Vec { @@ -432,3 +337,45 @@ pub fn convert_network_data_points( total_tx_display, } } + +pub fn convert_process_data( + current_data: &data_farmer::DataCollection, +) -> (HashMap, Vec) { + let mut single_list = HashMap::new(); + + // cpu, mem, pids + let mut grouped_hashmap: HashMap)> = + std::collections::HashMap::new(); + + // Go through every single process in the list... and build a hashmap + single list + for process in &(current_data).process_harvest { + let entry = grouped_hashmap.entry(process.name.clone()).or_insert(( + process.pid, + 0.0, + 0.0, + Vec::new(), + )); + + (*entry).1 += process.cpu_usage_percent; + (*entry).2 += process.mem_usage_percent; + (*entry).3.push(process.pid); + + single_list.insert(process.pid, process.clone()); + } + + let grouped_list: Vec = grouped_hashmap + .iter() + .map(|(name, process_details)| { + let p = process_details.clone(); + ConvertedProcessData { + pid: p.0, + name: name.to_string(), + cpu_usage: p.1, + mem_usage: p.2, + group_pids: p.3, + } + }) + .collect::>(); + + (single_list, grouped_list) +} diff --git a/src/main.rs b/src/main.rs index dde51164..9d739ea0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,11 +35,9 @@ mod canvas; mod constants; mod data_conversion; -use app::data_harvester; -use app::data_harvester::processes::ProcessHarvest; +use app::data_harvester::{self, processes::ProcessSorting}; use constants::TICK_RATE_IN_MILLISECONDS; use data_conversion::*; -use std::collections::BTreeMap; use utils::error::{self, BottomError}; enum Event { @@ -267,7 +265,7 @@ fn main() -> error::Result<()> { } if app.update_process_gui { - handle_process_sorting(&mut app); + update_final_process_list(&mut app); app.update_process_gui = false; } } @@ -279,7 +277,6 @@ fn main() -> error::Result<()> { Event::Update(data) => { if !app.is_frozen { app.data_collection.eat_data(&data); - app.data = *data; // TODO: [OPT] remove this // Convert all data into tui-compliant components @@ -309,7 +306,10 @@ fn main() -> error::Result<()> { update_cpu_data_points(app.show_average_cpu, &app.data_collection); // Processes - handle_process_sorting(&mut app); + let (single, grouped) = convert_process_data(&app.data_collection); + app.canvas_data.process_data = single; + app.canvas_data.grouped_process_data = grouped; + update_final_process_list(&mut app); } } Event::Clean => { @@ -339,82 +339,6 @@ fn main() -> error::Result<()> { Ok(()) } -type TempProcess = (f64, f64, Vec); - -fn handle_process_sorting(app: &mut app::App) { - // Handle combining multi-pid processes to form one entry in table. - // This was done this way to save time and avoid code - // duplication... sorry future me. Really. - - // First, convert this all into a BTreeMap. The key is by name. This - // pulls double duty by allowing us to combine entries AND it sorts! - - // Fields for tuple: CPU%, MEM%, MEM_KB, PID_VEC - let mut process_map: BTreeMap = BTreeMap::new(); - for process in &app.data.list_of_processes { - let entry_val = process_map - .entry(process.name.clone()) - .or_insert((0.0, 0.0, vec![])); - entry_val.0 += process.cpu_usage_percent; - entry_val.1 += process.mem_usage_percent; - entry_val.2.push(process.pid); - } - - // Now... turn this back into the exact same vector... but now with merged processes! - app.data.grouped_list_of_processes = Some( - process_map - .iter() - .map(|(name, data)| { - ProcessHarvest { - pid: 0, // Irrelevant - cpu_usage_percent: data.0, - mem_usage_percent: data.1, - name: name.clone(), - pid_vec: Some(data.2.clone()), - } - }) - .collect::>(), - ); - - if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes { - if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type { - data_harvester::processes::sort_processes( - grouped_list_of_processes, - &data_harvester::processes::ProcessSorting::CPU, // Go back to default, negate PID for group - true, - ); - } else { - data_harvester::processes::sort_processes( - grouped_list_of_processes, - &app.process_sorting_type, - app.process_sorting_reverse, - ); - } - } - - data_harvester::processes::sort_processes( - &mut app.data.list_of_processes, - &app.process_sorting_type, - app.process_sorting_reverse, - ); - - let tuple_results = if app.use_simple { - simple_update_process_row( - &app.data, - &(app.get_current_search_query().to_ascii_lowercase()), - app.is_searching_with_pid(), - ) - } else { - regex_update_process_row( - &app.data, - app.get_current_regex_matcher(), - app.is_searching_with_pid(), - ) - }; - app.canvas_data.process_data = tuple_results.0; - app.canvas_data.grouped_process_data = tuple_results.1; -} - fn cleanup( terminal: &mut tui::terminal::Terminal>, ) -> error::Result<()> { @@ -425,3 +349,73 @@ fn cleanup( Ok(()) } + +fn update_final_process_list(app: &mut app::App) { + let mut filtered_process_data: Vec = if app.is_grouped() { + app.canvas_data + .grouped_process_data + .clone() + .into_iter() + .filter(|process| { + if let Ok(matcher) = app.get_current_regex_matcher() { + matcher.is_match(&process.name) + } else { + true + } + }) + .collect::>() + } else { + app.canvas_data + .process_data + .iter() + .filter(|(_pid, process)| { + if let Ok(matcher) = app.get_current_regex_matcher() { + if app.is_searching_with_pid() { + matcher.is_match(&process.pid.to_string()) + } else { + matcher.is_match(&process.name) + } + } else { + true + } + }) + .map(|(_pid, process)| ConvertedProcessData { + pid: process.pid, + name: process.name.clone(), + cpu_usage: process.cpu_usage_percent, + mem_usage: process.mem_usage_percent, + group_pids: vec![process.pid], + }) + .collect::>() + }; + + sort_process_data(&mut filtered_process_data, app); + app.canvas_data.finalized_process_data = filtered_process_data; +} + +fn sort_process_data(to_sort_vec: &mut Vec, app: &app::App) { + to_sort_vec.sort_by(|a, b| utils::gen_util::get_ordering(&a.name, &b.name, false)); + + match app.process_sorting_type { + ProcessSorting::CPU => { + to_sort_vec.sort_by(|a, b| { + utils::gen_util::get_ordering(a.cpu_usage, b.cpu_usage, app.process_sorting_reverse) + }); + } + ProcessSorting::MEM => { + to_sort_vec.sort_by(|a, b| { + utils::gen_util::get_ordering(a.mem_usage, b.mem_usage, app.process_sorting_reverse) + }); + } + ProcessSorting::NAME => to_sort_vec.sort_by(|a, b| { + utils::gen_util::get_ordering(&a.name, &b.name, app.process_sorting_reverse) + }), + ProcessSorting::PID => { + if !app.is_grouped() { + to_sort_vec.sort_by(|a, b| { + utils::gen_util::get_ordering(a.pid, b.pid, app.process_sorting_reverse) + }); + } + } + } +} diff --git a/src/utils/gen_util.rs b/src/utils/gen_util.rs index 73c04371..04417dab 100644 --- a/src/utils/gen_util.rs +++ b/src/utils/gen_util.rs @@ -59,3 +59,29 @@ pub fn get_simple_byte_values(bytes: u64, spacing: bool) -> (f64, String) { _ => (bytes as f64 / 1_000_000_000_000.0, "TB".to_string()), } } + +/// Gotta get partial ordering? No problem, here's something to deal with it~ +pub fn get_ordering( + a_val: T, b_val: T, reverse_order: bool, +) -> std::cmp::Ordering { + match a_val.partial_cmp(&b_val) { + Some(x) => match x { + Ordering::Greater => { + if reverse_order { + std::cmp::Ordering::Less + } else { + std::cmp::Ordering::Greater + } + } + Ordering::Less => { + if reverse_order { + std::cmp::Ordering::Greater + } else { + std::cmp::Ordering::Less + } + } + Ordering::Equal => Ordering::Equal, + }, + None => Ordering::Equal, + } +} From 0ab4b7f7cc2a57fcd75adbaccdcc1a612402dee1 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 2 Feb 2020 00:24:00 -0500 Subject: [PATCH 24/26] Fix process searching while scrolling... that was broken for a while, I guess --- src/app.rs | 66 ++++++++++++++++++++++----------------------------- src/canvas.rs | 15 ++++++++---- src/main.rs | 4 ++-- 3 files changed, 42 insertions(+), 43 deletions(-) diff --git a/src/app.rs b/src/app.rs index c1682728..ea33077c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -9,7 +9,7 @@ use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::err mod process_killer; -#[derive(Clone, Copy)] +#[derive(Debug, Clone, Copy)] pub enum WidgetPosition { Cpu, Mem, @@ -86,7 +86,7 @@ pub struct App { enable_searching: bool, current_search_query: String, searching_pid: bool, - pub use_simple: bool, + pub ignore_case: bool, current_regex: std::result::Result, current_cursor_position: usize, pub data_collection: DataCollection, @@ -132,7 +132,7 @@ impl App { enable_searching: false, current_search_query: String::default(), searching_pid: false, - use_simple: false, + ignore_case: false, current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning current_cursor_position: 0, data_collection: DataCollection::default(), @@ -246,26 +246,28 @@ impl App { &self.current_search_query } - pub fn toggle_simple_search(&mut self) { + pub fn toggle_ignore_case(&mut self) { if !self.is_in_dialog() && self.is_searching() { if let WidgetPosition::ProcessSearch = self.current_widget_selected { - self.use_simple = !self.use_simple; - - // Update to latest (when simple is on this is not updated) - if !self.use_simple { - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; - } - - // Force update to process display in GUI + self.ignore_case = !self.ignore_case; + self.update_regex(); self.update_process_gui = true; } } } + fn update_regex(&mut self) { + self.current_regex = if self.current_search_query.is_empty() { + BASE_REGEX.clone() + } else if self.ignore_case { + regex::Regex::new(&(format!("(?i){}", self.current_search_query))) + } else { + regex::Regex::new(&(self.current_search_query)) + }; + self.previous_process_position = 0; + self.currently_selected_process_position = 0; + } + pub fn get_cursor_position(&self) -> usize { self.current_cursor_position } @@ -294,13 +296,7 @@ impl App { self.current_search_query .remove(self.current_cursor_position); - if !self.use_simple { - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; - } + self.update_regex(); self.update_process_gui = true; } } @@ -381,13 +377,8 @@ impl App { .insert(self.current_cursor_position, caught_char); self.current_cursor_position += 1; - if !self.use_simple { - self.current_regex = if self.current_search_query.is_empty() { - BASE_REGEX.clone() - } else { - regex::Regex::new(&(self.current_search_query)) - }; - } + self.update_regex(); + self.update_process_gui = true; } else { match caught_char { @@ -401,7 +392,7 @@ impl App { self.second_char = ' '; let current_process = Vec::new(); - // TODO: FIX THIS SHITTTTTT + // TODO: Fix self.to_delete_process_list = Some(current_process); self.show_dd = true; @@ -607,14 +598,15 @@ impl App { match self.current_widget_selected { WidgetPosition::Process => { self.currently_selected_process_position = - self.data.list_of_processes.len() as i64 - 1 + self.canvas_data.finalized_process_data.len() as i64 - 1 } WidgetPosition::Temp => { self.currently_selected_temperature_position = - self.data.temperature_sensors.len() as i64 - 1 + self.canvas_data.temp_sensor_data.len() as i64 - 1 } WidgetPosition::Disk => { - self.currently_selected_disk_position = self.data.disks.len() as i64 - 1 + self.currently_selected_disk_position = + self.canvas_data.disk_data.len() as i64 - 1 } WidgetPosition::Cpu => { self.currently_selected_cpu_table_position = @@ -667,7 +659,7 @@ impl App { fn change_process_position(&mut self, num_to_change_by: i64) { if self.currently_selected_process_position + num_to_change_by >= 0 && self.currently_selected_process_position + num_to_change_by - < self.data.list_of_processes.len() as i64 + < self.canvas_data.finalized_process_data.len() as i64 { self.currently_selected_process_position += num_to_change_by; } @@ -676,7 +668,7 @@ impl App { fn change_temp_position(&mut self, num_to_change_by: i64) { if self.currently_selected_temperature_position + num_to_change_by >= 0 && self.currently_selected_temperature_position + num_to_change_by - < self.data.temperature_sensors.len() as i64 + < self.canvas_data.temp_sensor_data.len() as i64 { self.currently_selected_temperature_position += num_to_change_by; } @@ -685,7 +677,7 @@ impl App { fn change_disk_position(&mut self, num_to_change_by: i64) { if self.currently_selected_disk_position + num_to_change_by >= 0 && self.currently_selected_disk_position + num_to_change_by - < self.data.disks.len() as i64 + < self.canvas_data.disk_data.len() as i64 { self.currently_selected_disk_position += num_to_change_by; } diff --git a/src/canvas.rs b/src/canvas.rs index 093e6815..c4138f29 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -912,10 +912,10 @@ fn draw_search_field( } else { Text::styled("\nName", Style::default().fg(TABLE_HEADER_COLOUR)) }, - if app_state.use_simple { - Text::styled(" (Simple): ", Style::default().fg(TABLE_HEADER_COLOUR)) + if app_state.ignore_case { + Text::styled(" (Ignore Case): ", Style::default().fg(TABLE_HEADER_COLOUR)) } else { - Text::styled(" (Regex): ", Style::default().fg(TABLE_HEADER_COLOUR)) + Text::styled(": ", Style::default().fg(TABLE_HEADER_COLOUR)) }, ]; @@ -957,13 +957,20 @@ fn draw_processes_table( // do so by hiding some elements! let num_rows = i64::from(draw_loc.height) - 5; - let start_position = get_start_position( + let position = get_start_position( num_rows, &(app_state.scroll_direction), &mut app_state.previous_process_position, app_state.currently_selected_process_position, ); + // Sanity check + let start_position = if position >= process_data.len() as i64 { + std::cmp::max(0, process_data.len() as i64 - 1) + } else { + position + }; + let sliced_vec: Vec = (&process_data[start_position as usize..]).to_vec(); let mut process_counter = 0; diff --git a/src/main.rs b/src/main.rs index 9d739ea0..ae3df136 100644 --- a/src/main.rs +++ b/src/main.rs @@ -131,7 +131,7 @@ fn main() -> error::Result<()> { // Set default search method if matches.is_present("CASE_INSENSITIVE_DEFAULT") { - app.use_simple = true; + app.ignore_case = true; } // Set up up tui and crossterm @@ -256,7 +256,7 @@ fn main() -> error::Result<()> { } } // TODO: [SEARCH] Rename "simple" search to just... search without cases... - KeyCode::Char('s') => app.toggle_simple_search(), + KeyCode::Char('s') => app.toggle_ignore_case(), KeyCode::Char('a') => app.skip_cursor_beginning(), KeyCode::Char('e') => app.skip_cursor_end(), _ => {} From 332708744387d7658f3a22802f4aafde4d87f9f8 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 2 Feb 2020 00:52:41 -0500 Subject: [PATCH 25/26] Fix dd with new changes --- src/app.rs | 43 ++++++++++++++++++++++++++++++++++--------- src/canvas.rs | 33 +++++++++++++++++---------------- 2 files changed, 51 insertions(+), 25 deletions(-) diff --git a/src/app.rs b/src/app.rs index ea33077c..fd5a8934 100644 --- a/src/app.rs +++ b/src/app.rs @@ -5,7 +5,7 @@ use std::time::Instant; pub mod data_farmer; use data_farmer::*; -use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; +use crate::{canvas, constants, utils::error::Result}; mod process_killer; @@ -76,7 +76,7 @@ pub struct App { pub show_help: bool, pub show_dd: bool, pub dd_err: Option, - to_delete_process_list: Option>, + to_delete_process_list: Option<(String, Vec)>, pub is_frozen: bool, pub left_legend: bool, pub use_current_cpu_total: bool, @@ -390,12 +390,37 @@ impl App { if self.awaiting_second_char && self.second_char == 'd' { self.awaiting_second_char = false; self.second_char = ' '; - let current_process = Vec::new(); - // TODO: Fix + if self.currently_selected_process_position + < self.canvas_data.finalized_process_data.len() as i64 + { + let current_process = if self.is_grouped() { + let group_pids = &self.canvas_data.finalized_process_data + [self.currently_selected_process_position as usize] + .group_pids; + + let mut ret = ("".to_string(), group_pids.clone()); + + for pid in group_pids { + if let Some(process) = + self.canvas_data.process_data.get(&pid) + { + ret.0 = process.name.clone(); + break; + } + } + ret + } else { + let process = self.canvas_data.finalized_process_data + [self.currently_selected_process_position as usize] + .clone(); + (process.name.clone(), vec![process.pid]) + }; + + self.to_delete_process_list = Some(current_process); + self.show_dd = true; + } - self.to_delete_process_list = Some(current_process); - self.show_dd = true; self.reset_multi_tap_keys(); } else { self.awaiting_second_char = true; @@ -490,8 +515,8 @@ impl App { // Technically unnecessary but this is a good check... if let WidgetPosition::Process = self.current_widget_selected { if let Some(current_selected_processes) = &(self.to_delete_process_list) { - for current_selected_process in current_selected_processes { - process_killer::kill_process_given_pid(current_selected_process.pid)?; + for pid in ¤t_selected_processes.1 { + process_killer::kill_process_given_pid(*pid)?; } } self.to_delete_process_list = None; @@ -499,7 +524,7 @@ impl App { Ok(()) } - pub fn get_current_highlighted_process_list(&self) -> Option> { + pub fn get_to_delete_processes(&self) -> Option<(String, Vec)> { self.to_delete_process_list.clone() } diff --git a/src/canvas.rs b/src/canvas.rs index c4138f29..0cc5904e 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -239,23 +239,23 @@ pub fn draw_data( .alignment(Alignment::Center) .wrap(true) .render(&mut f, middle_dialog_chunk[1]); - } else if let Some(process_list) = app_state.get_current_highlighted_process_list() { - if let Some(process) = process_list.first() { + } else if let Some(to_kill_processes) = app_state.get_to_delete_processes() { + if let Some(first_pid) = to_kill_processes.1.first() { let dd_text = [ - if app_state.is_grouped() { - Text::raw(format!( - "\nAre you sure you want to kill {} process(es) with name {}?", - process_list.len(), process.name - )) - } else { - Text::raw(format!( - "\nAre you sure you want to kill process {} with PID {}?", - process.name, process.pid - )) - }, - Text::raw("\n\nPress ENTER to proceed, ESC to exit."), - Text::raw("\nNote that if bottom is frozen, it must be unfrozen for changes to be shown."), - ]; + if app_state.is_grouped() { + Text::raw(format!( + "\nAre you sure you want to kill {} process(es) with name {}?", + to_kill_processes.1.len(), to_kill_processes.0 + )) + } else { + Text::raw(format!( + "\nAre you sure you want to kill process {} with PID {}?", + to_kill_processes.0, first_pid + )) + }, + Text::raw("\n\nPress ENTER to proceed, ESC to exit."), + Text::raw("\nNote that if bottom is frozen, it must be unfrozen for changes to be shown."), + ]; Paragraph::new(dd_text.iter()) .block( @@ -268,6 +268,7 @@ pub fn draw_data( .wrap(true) .render(&mut f, middle_dialog_chunk[1]); } else { + // This is a bit nasty, but it works well... I guess. app_state.show_dd = false; } } else { From 853ce3c736e94b12b98ffa867fd0d042add8bd7c Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sun, 2 Feb 2020 00:54:40 -0500 Subject: [PATCH 26/26] Move ignore case to tab --- README.md | 2 +- src/app.rs | 1 + src/canvas.rs | 2 +- src/main.rs | 1 - 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2b877a6f..78acf053 100644 --- a/README.md +++ b/README.md @@ -128,7 +128,7 @@ when searching processes. - `Ctrl-p` or `Ctrl-n` to switch between searching for PID and name respectively. -- `Ctrl-s` to toggle between a simple search and a regex search. +- `Tab` to toggle whether to ignore case. - `Ctrl-a` and `Ctrl-e` to jump to the start and end of the search bar respectively. diff --git a/src/app.rs b/src/app.rs index fd5a8934..ea140981 100644 --- a/src/app.rs +++ b/src/app.rs @@ -189,6 +189,7 @@ impl App { match self.current_widget_selected { WidgetPosition::Process => self.toggle_grouping(), WidgetPosition::Disk => {} + WidgetPosition::ProcessSearch => self.toggle_ignore_case(), _ => {} } } diff --git a/src/canvas.rs b/src/canvas.rs index 0cc5904e..4768426f 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -53,7 +53,7 @@ lazy_static! { Text::raw("Ctrl-f to toggle searching for a process. / to just open it.\n"), Text::raw("Use Ctrl-p and Ctrl-n to toggle between searching for PID and name.\n"), Text::raw("Use Ctrl-a and Ctrl-e to set the cursor to the start and end of the bar respectively.\n"), - Text::raw("Use Ctrl-s to toggle between simple and regex search.\n"), + Text::raw("Use Tab to toggle whether to ignore case.\n"), Text::raw("\nFor startup flags, type in \"btm -h\".") ]; static ref COLOUR_LIST: Vec = gen_n_colours(constants::NUM_COLOURS); diff --git a/src/main.rs b/src/main.rs index ae3df136..f2ed4d42 100644 --- a/src/main.rs +++ b/src/main.rs @@ -256,7 +256,6 @@ fn main() -> error::Result<()> { } } // TODO: [SEARCH] Rename "simple" search to just... search without cases... - KeyCode::Char('s') => app.toggle_ignore_case(), KeyCode::Char('a') => app.skip_cursor_beginning(), KeyCode::Char('e') => app.skip_cursor_end(), _ => {}