From 7bd49be49a47924ed6a3e91bcf68bba9c8030967 Mon Sep 17 00:00:00 2001 From: ClementTsang Date: Sat, 5 Oct 2019 21:55:35 -0400 Subject: [PATCH] Removed unsafe unwraps --- TODO.md | 18 ++--- src/app/data_collection/processes.rs | 20 +++-- src/canvas.rs | 34 +++++---- src/convert_data.rs | 108 ++++++++++++++------------- 4 files changed, 99 insertions(+), 81 deletions(-) diff --git a/TODO.md b/TODO.md index 223be854..6bc722bc 100644 --- a/TODO.md +++ b/TODO.md @@ -2,24 +2,16 @@ Note this will probably migrate to GitHub's native Issues; this was mostly for personal use during early stages. -## Want to do really badly - -- Rebalance cpu usage in process by using current value (it's currently just summing to 100%) +## Want to do - Travis to automate building -- Refactoring! - - Scaling in and out (zoom), may need to show zoom levels - More keybinds (jumping, scaling, help) -- Tests - - ~~Add custom error because it's really messy~~ Done, but need to implement across rest of app! -- Remove any `unwrap()`, ensure no crashing! Might have to use this: - - Efficiency... for example, reduce some redraw logic if possible (ie: no changes to dir sorting) - Filtering in processes (that is, allow searching) @@ -30,6 +22,12 @@ Note this will probably migrate to GitHub's native Issues; this was mostly for p ## Less important +- Rebalance cpu usage in process by using current value (it's currently just summing to 100%) + +- Tests + +- Refactoring! + - Mouse + key events conflict? Make it so that some events don't clog up the loop if they are not valid keys! - Modularity @@ -39,3 +37,5 @@ Note this will probably migrate to GitHub's native Issues; this was mostly for p - Truncate columns if needed for tables - Grouping by process + +- Deal with async and stuff (remove if not needed) diff --git a/src/app/data_collection/processes.rs b/src/app/data_collection/processes.rs index 02eb3bb7..6de81df2 100644 --- a/src/app/data_collection/processes.rs +++ b/src/app/data_collection/processes.rs @@ -67,7 +67,12 @@ fn vangelis_cpu_usage_calculation(prev_idle : &mut f64, prev_non_idle : &mut f64 *prev_idle = idle; *prev_non_idle = non_idle; - let result = if total_delta - idle_delta != 0_f64 { total_delta - idle_delta } else { 1_f64 }; + let result = if total_delta - idle_delta != 0_f64 { + total_delta - idle_delta + } + else { + 1_f64 + }; Ok(result) // This works, REALLY damn well. The percentage check is within like 2% of the sysinfo one. } @@ -169,13 +174,14 @@ pub async fn get_sorted_processes_list( let ps_result = Command::new("ps").args(&["-axo", "pid:10,comm:50,%mem:5", "--noheader"]).output()?; let ps_stdout = String::from_utf8_lossy(&ps_result.stdout); let split_string = ps_stdout.split('\n'); - let cpu_usage = vangelis_cpu_usage_calculation(prev_idle, prev_non_idle).unwrap(); // TODO: FIX THIS ERROR CHECKING - let process_stream = split_string.collect::>(); + if let Ok(cpu_usage) = vangelis_cpu_usage_calculation(prev_idle, prev_non_idle) { + let process_stream = split_string.collect::>(); - for process in process_stream { - if let Ok(process_object) = convert_ps(process, cpu_usage, prev_pid_stats) { - if !process_object.command.is_empty() { - process_vector.push(process_object); + for process in process_stream { + if let Ok(process_object) = convert_ps(process, cpu_usage, prev_pid_stats) { + if !process_object.command.is_empty() { + process_vector.push(process_object); + } } } } diff --git a/src/canvas.rs b/src/canvas.rs index c15782ae..f1e29463 100644 --- a/src/canvas.rs +++ b/src/canvas.rs @@ -147,21 +147,25 @@ pub fn draw_data(terminal : &mut Terminal, app_state : .style(Style::default().fg(Color::LightBlue)) .data(&canvas_data.mem_data)]; - if !(&canvas_data.swap_data).is_empty() && (&canvas_data.swap_data).last().unwrap().1 >= 0.0 { - swap_name = "SWP:".to_string() - + &format!("{:3}%", (canvas_data.swap_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)) - + &format!( - " {:.1}GB/{:.1}GB", - canvas_data.mem_values[1].0 as f64 / 1024.0, - canvas_data.mem_values[1].1 as f64 / 1024.0 - ); - mem_canvas_vec.push( - Dataset::default() - .name(&swap_name) - .marker(if app_state.use_dot { Marker::Dot } else { Marker::Braille }) - .style(Style::default().fg(Color::LightYellow)) - .data(&canvas_data.swap_data), - ); + if !(&canvas_data.swap_data).is_empty() { + if let Some(last_canvas_result) = (&canvas_data.swap_data).last() { + if last_canvas_result.1 >= 0.0 { + swap_name = "SWP:".to_string() + + &format!("{:3}%", (canvas_data.swap_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)) + + &format!( + " {:.1}GB/{:.1}GB", + canvas_data.mem_values[1].0 as f64 / 1024.0, + canvas_data.mem_values[1].1 as f64 / 1024.0 + ); + mem_canvas_vec.push( + Dataset::default() + .name(&swap_name) + .marker(if app_state.use_dot { Marker::Dot } else { Marker::Braille }) + .style(Style::default().fg(Color::LightYellow)) + .data(&canvas_data.swap_data), + ); + } + } } Chart::default() diff --git a/src/convert_data.rs b/src/convert_data.rs index 19486828..65b2524d 100644 --- a/src/convert_data.rs +++ b/src/convert_data.rs @@ -28,39 +28,47 @@ pub fn update_disk_row(app_data : &data_collection::Data) -> Vec> { let mut disk_vector : Vec> = Vec::new(); for disk in &app_data.list_of_disks { let io_activity = if app_data.list_of_io.len() > 2 { - let io_package = &app_data.list_of_io.last().unwrap(); - let prev_io_package = &app_data.list_of_io[app_data.list_of_io.len() - 2]; + if let Some(io_package) = &app_data.list_of_io.last() { + if let Some(trimmed_mount) = disk.name.to_string().split('/').last() { + let prev_io_package = &app_data.list_of_io[app_data.list_of_io.len() - 2]; - let io_hashmap = &io_package.io_hash; - let prev_io_hashmap = &prev_io_package.io_hash; - let trimmed_mount = &disk.name.to_string().split('/').last().unwrap().to_string(); - let time_difference = io_package.instant.duration_since(prev_io_package.instant).as_secs_f64(); - if io_hashmap.contains_key(trimmed_mount) && prev_io_hashmap.contains_key(trimmed_mount) { - // Ideally change this... - let ele = &io_hashmap[trimmed_mount]; - let prev = &prev_io_hashmap[trimmed_mount]; - let read_bytes_per_sec = ((ele.read_bytes - prev.read_bytes) as f64 / time_difference) as u64; - let write_bytes_per_sec = ((ele.write_bytes - prev.write_bytes) as f64 / time_difference) as u64; - ( - if read_bytes_per_sec < 1024 { - format!("{}B", read_bytes_per_sec) - } - else if read_bytes_per_sec < 1024 * 1024 { - format!("{}KB", read_bytes_per_sec / 1024) + let io_hashmap = &io_package.io_hash; + let prev_io_hashmap = &prev_io_package.io_hash; + let time_difference = io_package.instant.duration_since(prev_io_package.instant).as_secs_f64(); + if io_hashmap.contains_key(trimmed_mount) && prev_io_hashmap.contains_key(trimmed_mount) { + // Ideally change this... + let ele = &io_hashmap[trimmed_mount]; + let prev = &prev_io_hashmap[trimmed_mount]; + let read_bytes_per_sec = ((ele.read_bytes - prev.read_bytes) as f64 / time_difference) as u64; + let write_bytes_per_sec = ((ele.write_bytes - prev.write_bytes) as f64 / time_difference) as u64; + ( + if read_bytes_per_sec < 1024 { + format!("{}B", read_bytes_per_sec) + } + else if read_bytes_per_sec < 1024 * 1024 { + format!("{}KB", read_bytes_per_sec / 1024) + } + else { + format!("{}MB", read_bytes_per_sec / 1024 / 1024) + }, + if write_bytes_per_sec < 1024 { + format!("{}B", write_bytes_per_sec) + } + else if write_bytes_per_sec < 1024 * 1024 { + format!("{}KB", write_bytes_per_sec / 1024) + } + else { + format!("{}MB", write_bytes_per_sec / 1024 / 1024) + }, + ) } else { - format!("{}MB", read_bytes_per_sec / 1024 / 1024) - }, - if write_bytes_per_sec < 1024 { - format!("{}B", write_bytes_per_sec) + ("0B".to_string(), "0B".to_string()) } - else if write_bytes_per_sec < 1024 * 1024 { - format!("{}KB", write_bytes_per_sec / 1024) - } - else { - format!("{}MB", write_bytes_per_sec / 1024 / 1024) - }, - ) + } + else { + ("0B".to_string(), "0B".to_string()) + } } else { ("0B".to_string(), "0B".to_string()) @@ -161,15 +169,17 @@ pub fn update_cpu_data_points(show_avg_cpu : bool, app_data : &data_collection:: // Finally, add it all onto the end for (i, data) in cpu_collection.iter().enumerate() { - cpu_data_vector.push(( - // + 1 to skip total CPU if show_avg_cpu is false - format!( - "{:4}: ", - &*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec[i + if show_avg_cpu { 0 } else { 1 }].cpu_name) - ) - .to_uppercase() + &format!("{:3}%", (data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)), - data.clone(), - )) + if !app_data.list_of_cpu_packages.is_empty() { + cpu_data_vector.push(( + // + 1 to skip total CPU if show_avg_cpu is false + format!( + "{:4}: ", + &*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec[i + if show_avg_cpu { 0 } else { 1 }].cpu_name) + ) + .to_uppercase() + &format!("{:3}%", (data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)), + data.clone(), + )) + } } } @@ -287,16 +297,10 @@ pub fn convert_network_data_points(network_data : &[data_collection::network::Ne rx.push(rx_data); tx.push(tx_data); - - //debug!("Pushed rx: ({}, {})", rx.last().unwrap().0, rx.last().unwrap().1); - //debug!("Pushed tx: ({}, {})", tx.last().unwrap().0, tx.last().unwrap().1); } - let rx_display = if network_data.is_empty() { - "0B".to_string() - } - else { - let num_bytes = network_data.last().unwrap().rx; + let rx_display = if let Some(last_num_bytes_entry) = network_data.last() { + let num_bytes = last_num_bytes_entry.rx; if num_bytes < 1024 { format!("RX: {:4} B", num_bytes).to_string() } @@ -309,12 +313,13 @@ pub fn convert_network_data_points(network_data : &[data_collection::network::Ne else { format!("RX: {:4}GB", num_bytes / 1024 / 1024 / 1024).to_string() } - }; - let tx_display = if network_data.is_empty() { - "0B".to_string() } else { - let num_bytes = network_data.last().unwrap().tx; + "0B".to_string() + }; + + let tx_display = if let Some(last_num_bytes_entry) = network_data.last() { + let num_bytes = last_num_bytes_entry.tx; if num_bytes < 1024 { format!("TX: {:4} B", num_bytes).to_string() } @@ -327,6 +332,9 @@ pub fn convert_network_data_points(network_data : &[data_collection::network::Ne else { format!("TX: {:4}GB", num_bytes / 1024 / 1024 / 1024).to_string() } + } + else { + "0B".to_string() }; ConvertedNetworkData {