Reworked network again; will use this to change all widgets

This commit is contained in:
ClementTsang 2020-01-25 16:36:14 -05:00
parent fe99b99d0a
commit 13f6dfc529
13 changed files with 300 additions and 240 deletions

View File

@ -1,7 +1,10 @@
pub mod data_collection; pub mod data_harvester;
use data_collection::{processes, temperature}; use data_harvester::{processes, temperature};
use std::time::Instant; use std::time::Instant;
pub mod data_janitor;
use data_janitor::*;
use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result}; use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result};
mod process_killer; mod process_killer;
@ -30,6 +33,23 @@ lazy_static! {
regex::Regex::new(".*"); regex::Regex::new(".*");
} }
/// AppConfigFields is meant to cover basic fields that would normally be set
/// by config files or launch options. Don't need to be mutable (set and forget).
pub struct AppConfigFields {
pub update_rate_in_milliseconds: u64,
pub temperature_type: temperature::TemperatureType,
pub use_dot: bool,
}
/// AppScrollWidgetState deals with fields for a scrollable app's current state.
pub struct AppScrollWidgetState {
pub widget_scroll_position: i64,
}
/// AppSearchState only deals with the search's state.
pub struct AppSearchState {}
// TODO: [OPT] Group like fields together... this is kinda gross to step through
pub struct App { pub struct App {
// Sorting // Sorting
pub process_sorting_type: processes::ProcessSorting, pub process_sorting_type: processes::ProcessSorting,
@ -49,7 +69,7 @@ pub struct App {
pub update_rate_in_milliseconds: u64, pub update_rate_in_milliseconds: u64,
pub show_average_cpu: bool, pub show_average_cpu: bool,
pub current_application_position: ApplicationPosition, pub current_application_position: ApplicationPosition,
pub data: data_collection::Data, pub data: data_harvester::Data,
awaiting_second_char: bool, awaiting_second_char: bool,
second_char: char, second_char: char,
pub use_dot: bool, pub use_dot: bool,
@ -63,12 +83,13 @@ pub struct App {
last_key_press: Instant, last_key_press: Instant,
pub canvas_data: canvas::CanvasData, pub canvas_data: canvas::CanvasData,
enable_grouping: bool, enable_grouping: bool,
enable_searching: bool, // TODO: [OPT] group together? enable_searching: bool,
current_search_query: String, current_search_query: String,
searching_pid: bool, searching_pid: bool,
pub use_simple: bool, pub use_simple: bool,
current_regex: std::result::Result<regex::Regex, regex::Error>, current_regex: std::result::Result<regex::Regex, regex::Error>,
current_cursor_position: usize, current_cursor_position: usize,
pub data_collection: DataCollection,
} }
impl App { impl App {
@ -94,7 +115,7 @@ impl App {
previous_disk_position: 0, previous_disk_position: 0,
previous_temp_position: 0, previous_temp_position: 0,
previous_cpu_table_position: 0, previous_cpu_table_position: 0,
data: data_collection::Data::default(), data: data_harvester::Data::default(),
awaiting_second_char: false, awaiting_second_char: false,
second_char: ' ', second_char: ' ',
use_dot, use_dot,
@ -114,6 +135,7 @@ impl App {
use_simple: false, use_simple: false,
current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning
current_cursor_position: 0, current_cursor_position: 0,
data_collection: DataCollection::default(),
} }
} }

View File

@ -1,86 +0,0 @@
use futures::StreamExt;
use heim::net;
use heim::units::information::byte;
use std::time::Instant;
use sysinfo::{NetworkExt, System, SystemExt};
#[derive(Clone, Debug)]
pub struct NetworkJoinPoint {
pub rx: f64,
pub tx: f64,
pub time_offset_milliseconds: f64,
}
type NetworkDataGroup = (Instant, (NetworkData, Option<Vec<NetworkJoinPoint>>));
#[derive(Clone, Debug)]
pub struct NetworkStorage {
pub data_points: Vec<NetworkDataGroup>,
pub rx: u64,
pub tx: u64,
pub total_rx: u64,
pub total_tx: u64,
pub last_collection_time: Instant,
}
impl Default for NetworkStorage {
fn default() -> Self {
NetworkStorage {
data_points: Vec::default(),
rx: 0,
tx: 0,
total_rx: 0,
total_tx: 0,
last_collection_time: Instant::now(),
}
}
}
impl NetworkStorage {
pub fn first_run(&mut self) {
self.data_points = Vec::default();
self.rx = 0;
self.tx = 0;
}
}
#[derive(Clone, Debug)]
/// Note all values are in bytes...
pub struct NetworkData {
pub rx: u64,
pub tx: u64,
}
pub async fn get_network_data(
sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64,
curr_time: &Instant,
) -> NetworkData {
// FIXME: [WIN] Track current total bytes... also is this accurate?
if cfg!(target_os = "windows") {
let network_data = sys.get_network();
NetworkData {
rx: network_data.get_income(),
tx: network_data.get_outcome(),
}
} else {
let mut io_data = net::io_counters();
let mut net_rx: u64 = 0;
let mut net_tx: u64 = 0;
while let Some(io) = io_data.next().await {
if let Ok(io) = io {
net_rx += io.bytes_recv().get::<byte>();
net_tx += io.bytes_sent().get::<byte>();
}
}
let elapsed_time = curr_time
.duration_since(*prev_net_access_time)
.as_secs_f64();
let rx = ((net_rx - *prev_net_rx) as f64 / elapsed_time) as u64;
let tx = ((net_tx - *prev_net_tx) as f64 / elapsed_time) as u64;
*prev_net_rx = net_rx;
*prev_net_tx = net_tx;
NetworkData { rx, tx }
}
}

View File

@ -23,39 +23,52 @@ fn push_if_valid<T: std::clone::Clone>(result: &Result<T>, vector_to_push: &mut
} }
} }
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug)]
pub struct Data { pub struct Data {
pub list_of_cpu_packages: Vec<cpu::CPUPackage>, pub list_of_cpu_packages: Vec<cpu::CPUPackage>,
pub list_of_io: Vec<disks::IOPackage>, pub list_of_io: Vec<disks::IOPackage>,
pub list_of_physical_io: Vec<disks::IOPackage>,
pub memory: Vec<mem::MemData>, pub memory: Vec<mem::MemData>,
pub swap: Vec<mem::MemData>, pub swap: Vec<mem::MemData>,
pub list_of_temperature_sensor: Vec<temperature::TempData>, pub list_of_temperature_sensor: Vec<temperature::TempData>,
pub network: network::NetworkStorage, pub network: network::NetworkHarvest,
pub list_of_processes: Vec<processes::ProcessData>, pub list_of_processes: Vec<processes::ProcessData>,
pub grouped_list_of_processes: Option<Vec<processes::ProcessData>>, pub grouped_list_of_processes: Option<Vec<processes::ProcessData>>,
pub list_of_disks: Vec<disks::DiskData>, pub list_of_disks: Vec<disks::DiskData>,
pub last_collection_time: Instant,
}
impl Default for Data {
fn default() -> Self {
Data {
list_of_cpu_packages: Vec::default(),
list_of_io: Vec::default(),
memory: Vec::default(),
swap: Vec::default(),
list_of_temperature_sensor: Vec::default(),
list_of_processes: Vec::default(),
grouped_list_of_processes: None,
list_of_disks: Vec::default(),
network: network::NetworkHarvest::default(),
last_collection_time: Instant::now(),
}
}
} }
impl Data { impl Data {
pub fn first_run_cleanup(&mut self) { pub fn first_run_cleanup(&mut self) {
self.list_of_cpu_packages = Vec::new(); self.list_of_cpu_packages = Vec::new();
self.list_of_io = Vec::new(); self.list_of_io = Vec::new();
self.list_of_physical_io = Vec::new();
self.memory = Vec::new(); self.memory = Vec::new();
self.swap = Vec::new(); self.swap = Vec::new();
self.list_of_temperature_sensor = Vec::new(); self.list_of_temperature_sensor = Vec::new();
self.list_of_processes = Vec::new(); self.list_of_processes = Vec::new();
self.grouped_list_of_processes = None; self.grouped_list_of_processes = None;
self.list_of_disks = Vec::new(); self.list_of_disks = Vec::new();
self.network.first_run();
} }
} }
pub struct DataState { pub struct DataState {
pub data: Data, pub data: Data,
first_run: bool,
sys: System, sys: System,
stale_max_seconds: u64, stale_max_seconds: u64,
prev_pid_stats: HashMap<String, (f64, Instant)>, prev_pid_stats: HashMap<String, (f64, Instant)>,
@ -70,7 +83,6 @@ impl Default for DataState {
fn default() -> Self { fn default() -> Self {
DataState { DataState {
data: Data::default(), data: Data::default(),
first_run: true,
sys: System::new(), sys: System::new(),
stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000, stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000,
prev_pid_stats: HashMap::new(), prev_pid_stats: HashMap::new(),
@ -108,57 +120,15 @@ impl DataState {
let current_instant = std::time::Instant::now(); let current_instant = std::time::Instant::now();
// Network // Network
let new_network_data = network::get_network_data( self.data.network = network::get_network_data(
&self.sys, &self.sys,
&self.data.network.last_collection_time, &self.data.last_collection_time,
&mut self.data.network.total_rx, &mut self.data.network.total_rx,
&mut self.data.network.total_tx, &mut self.data.network.total_tx,
&current_instant, &current_instant,
) )
.await; .await;
let joining_points: Option<Vec<network::NetworkJoinPoint>> =
if !self.data.network.data_points.is_empty() {
if let Some(last_entry) = self.data.network.data_points.last() {
// If not empty, inject joining points
let prev_data = &last_entry.1;
let rx_diff = new_network_data.rx as f64 - prev_data.0.rx as f64;
let tx_diff = new_network_data.tx as f64 - prev_data.0.tx as f64;
let time_gap = current_instant
.duration_since(self.data.network.last_collection_time)
.as_millis() as f64;
let mut new_joining_points = Vec::new();
let num_points = 50;
for idx in (0..num_points).rev() {
new_joining_points.push(network::NetworkJoinPoint {
rx: prev_data.0.rx as f64
+ rx_diff / num_points as f64 * (num_points - idx) as f64,
tx: prev_data.0.tx as f64
+ tx_diff / num_points as f64 * (num_points - idx) as f64,
time_offset_milliseconds: time_gap / num_points as f64 * idx as f64,
});
}
Some(new_joining_points)
} else {
None
}
} else {
None
};
// Set values
self.data.network.rx = new_network_data.rx;
self.data.network.tx = new_network_data.tx;
self.data.network.last_collection_time = current_instant;
// Add new point
self.data
.network
.data_points
.push((current_instant, (new_network_data, joining_points)));
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update! // What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
push_if_valid( push_if_valid(
&cpu::get_cpu_data_list(&self.sys, &current_instant), &cpu::get_cpu_data_list(&self.sys, &current_instant),
@ -198,10 +168,7 @@ impl DataState {
&mut self.data.list_of_temperature_sensor, &mut self.data.list_of_temperature_sensor,
); );
if self.first_run { self.data.last_collection_time = current_instant;
self.data.first_run_cleanup();
self.first_run = false;
}
// Filter out stale timed entries // Filter out stale timed entries
let clean_instant = Instant::now(); let clean_instant = Instant::now();

View File

@ -0,0 +1,55 @@
use futures::StreamExt;
use heim::net;
use heim::units::information::byte;
use std::time::Instant;
use sysinfo::{NetworkExt, System, SystemExt};
#[derive(Default, Clone, Debug)]
pub struct NetworkHarvest {
pub rx: u64,
pub tx: u64,
pub total_rx: u64,
pub total_tx: u64,
}
pub async fn get_network_data(
sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64,
curr_time: &Instant,
) -> NetworkHarvest {
// FIXME: [WIN] Track current total bytes... also is this accurate?
if cfg!(target_os = "windows") {
let network_data = sys.get_network();
NetworkHarvest {
rx: network_data.get_income(),
tx: network_data.get_outcome(),
total_rx: 0,
total_tx: 0,
}
} else {
let mut io_data = net::io_counters();
let mut total_rx: u64 = 0;
let mut total_tx: u64 = 0;
while let Some(io) = io_data.next().await {
if let Ok(io) = io {
total_rx += io.bytes_recv().get::<byte>();
total_tx += io.bytes_sent().get::<byte>();
}
}
let elapsed_time = curr_time
.duration_since(*prev_net_access_time)
.as_secs_f64();
let rx = ((total_rx - *prev_net_rx) as f64 / elapsed_time) as u64;
let tx = ((total_tx - *prev_net_tx) as f64 / elapsed_time) as u64;
*prev_net_rx = total_rx;
*prev_net_tx = total_tx;
NetworkHarvest {
rx,
tx,
total_rx,
total_tx,
}
}
}

120
src/app/data_janitor.rs Normal file
View File

@ -0,0 +1,120 @@
use crate::{data_harvester::network, data_harvester::Data};
/// In charge of cleaning and managing data. I couldn't think of a better
/// name for the file.
use std::time::Instant;
use std::vec::Vec;
pub type TimeOffset = f64;
pub type Value = f64;
pub type JoinedDataPoints = (Value, Vec<(TimeOffset, Value)>);
#[derive(Debug, Default)]
pub struct TimedData {
pub rx_data: JoinedDataPoints,
pub tx_data: JoinedDataPoints,
pub cpu_data: JoinedDataPoints,
pub mem_data: JoinedDataPoints,
pub swap_data: JoinedDataPoints,
}
/// AppCollection represents the pooled data stored within the main app
/// thread. Basically stores a (occasionally cleaned) record of the data
/// collected, and what is needed to convert into a displayable form.
///
/// If the app is *frozen* - that is, we do not want to *display* any changing
/// data, keep updating this, don't convert to canvas displayable data!
///
/// Note that with this method, the *app* thread is responsible for cleaning -
/// not the data collector.
#[derive(Debug)]
pub struct DataCollection {
pub current_instant: Instant,
pub timed_data_vec: Vec<(Instant, TimedData)>,
pub network_harvest: network::NetworkHarvest,
// pub process_data: ProcessData,
// pub disk_data: DiskData,
// pub temp_data: TempData,
}
impl Default for DataCollection {
fn default() -> Self {
DataCollection {
current_instant: Instant::now(),
timed_data_vec: Vec::default(),
network_harvest: network::NetworkHarvest::default(),
// process_data: ProcessData::default(),
}
}
}
impl DataCollection {
pub fn clean_data(&mut self) {}
pub fn eat_data(&mut self, harvested_data: &Data) {
let harvested_time = harvested_data.last_collection_time;
let mut new_entry = TimedData::default();
// RX
let rx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
generate_joining_points(
&time,
last_pt.rx_data.0,
&harvested_time,
harvested_data.network.rx as f64,
)
} else {
Vec::new()
};
let rx_pt = (harvested_data.network.rx as f64, rx_joining_pts);
new_entry.rx_data = rx_pt;
// TX
let tx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
generate_joining_points(
&time,
last_pt.tx_data.0,
&harvested_time,
harvested_data.network.tx as f64,
)
} else {
Vec::new()
};
let tx_pt = (harvested_data.network.tx as f64, tx_joining_pts);
new_entry.tx_data = tx_pt;
// Copy over data
self.network_harvest = harvested_data.network.clone();
// And we're done eating.
self.current_instant = harvested_time;
self.timed_data_vec.push((harvested_time, new_entry));
}
}
pub fn generate_joining_points(
start_x: &Instant, start_y: f64, end_x: &Instant, end_y: f64,
) -> Vec<(TimeOffset, Value)> {
let mut points: Vec<(TimeOffset, Value)> = Vec::new();
// Convert time floats first:
let time_difference = (*end_x).duration_since(*start_x).as_millis() as f64;
let value_difference = end_y - start_y;
// Let's generate... about this many points!
let num_points = std::cmp::min(
std::cmp::max(
(value_difference.abs() / (time_difference + 0.0001) * 1000.0) as u64,
100,
),
1000,
);
for itx in 0..num_points {
points.push((
time_difference - (itx as f64 / num_points as f64 * time_difference),
start_y + (itx as f64 / num_points as f64 * value_difference),
));
}
points
}

View File

@ -334,7 +334,6 @@ pub fn draw_data<B: backend::Backend>(
} else { } else {
5 5
}; };
debug!("Req: {}", required);
let remaining = bottom_chunks[0].height - required; let remaining = bottom_chunks[0].height - required;
[Constraint::Length(remaining), Constraint::Length(required)] [Constraint::Length(remaining), Constraint::Length(required)]
} }
@ -665,7 +664,7 @@ fn draw_network_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::Ap
let x_axis: Axis<String> = Axis::default() let x_axis: Axis<String> = Axis::default()
.style(Style::default().fg(GRAPH_COLOUR)) .style(Style::default().fg(GRAPH_COLOUR))
.bounds([0.0, 600_000.0]); .bounds([0.0, 60_000.0]);
let y_axis = Axis::default() let y_axis = Axis::default()
.style(Style::default().fg(GRAPH_COLOUR)) .style(Style::default().fg(GRAPH_COLOUR))
.bounds([-0.5, 30_f64]) .bounds([-0.5, 30_f64])
@ -1035,7 +1034,7 @@ fn draw_processes_table<B: backend::Backend>(
) )
}); });
use app::data_collection::processes::ProcessSorting; use app::data_harvester::processes::ProcessSorting;
let mut pid_or_name = if app_state.is_grouped() { let mut pid_or_name = if app_state.is_grouped() {
"Count" "Count"
} else { } else {

View File

@ -2,12 +2,14 @@
//! can actually handle. //! can actually handle.
use crate::{ use crate::{
app::data_collection, app::data_harvester,
app::data_janitor,
constants, constants,
utils::gen_util::{get_exact_byte_values, get_simple_byte_values}, utils::gen_util::{get_exact_byte_values, get_simple_byte_values},
}; };
use constants::*; use constants::*;
use regex::Regex; use regex::Regex;
use std::time::Instant;
#[derive(Default, Debug)] #[derive(Default, Debug)]
pub struct ConvertedNetworkData { pub struct ConvertedNetworkData {
@ -55,7 +57,7 @@ impl From<&CpuPoint> for (f64, f64) {
} }
pub fn update_temp_row( pub fn update_temp_row(
app_data: &data_collection::Data, temp_type: &data_collection::temperature::TemperatureType, app_data: &data_harvester::Data, temp_type: &data_harvester::temperature::TemperatureType,
) -> Vec<Vec<String>> { ) -> Vec<Vec<String>> {
let mut sensor_vector: Vec<Vec<String>> = Vec::new(); let mut sensor_vector: Vec<Vec<String>> = Vec::new();
@ -67,9 +69,9 @@ pub fn update_temp_row(
sensor.component_name.to_string(), sensor.component_name.to_string(),
(sensor.temperature.ceil() as u64).to_string() (sensor.temperature.ceil() as u64).to_string()
+ match temp_type { + match temp_type {
data_collection::temperature::TemperatureType::Celsius => "C", data_harvester::temperature::TemperatureType::Celsius => "C",
data_collection::temperature::TemperatureType::Kelvin => "K", data_harvester::temperature::TemperatureType::Kelvin => "K",
data_collection::temperature::TemperatureType::Fahrenheit => "F", data_harvester::temperature::TemperatureType::Fahrenheit => "F",
}, },
]); ]);
} }
@ -78,7 +80,7 @@ pub fn update_temp_row(
sensor_vector sensor_vector
} }
pub fn update_disk_row(app_data: &data_collection::Data) -> Vec<Vec<String>> { pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec<Vec<String>> {
let mut disk_vector: Vec<Vec<String>> = Vec::new(); let mut disk_vector: Vec<Vec<String>> = Vec::new();
for disk in &app_data.list_of_disks { for disk in &app_data.list_of_disks {
let io_activity = { let io_activity = {
@ -141,7 +143,7 @@ pub fn update_disk_row(app_data: &data_collection::Data) -> Vec<Vec<String>> {
} }
pub fn simple_update_process_row( pub fn simple_update_process_row(
app_data: &data_collection::Data, matching_string: &str, use_pid: bool, app_data: &data_harvester::Data, matching_string: &str, use_pid: bool,
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) { ) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
let process_vector: Vec<ConvertedProcessData> = app_data let process_vector: Vec<ConvertedProcessData> = app_data
.list_of_processes .list_of_processes
@ -183,7 +185,7 @@ pub fn simple_update_process_row(
} }
pub fn regex_update_process_row( pub fn regex_update_process_row(
app_data: &data_collection::Data, regex_matcher: &std::result::Result<Regex, regex::Error>, app_data: &data_harvester::Data, regex_matcher: &std::result::Result<Regex, regex::Error>,
use_pid: bool, use_pid: bool,
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) { ) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
let process_vector: Vec<ConvertedProcessData> = app_data let process_vector: Vec<ConvertedProcessData> = app_data
@ -226,7 +228,7 @@ pub fn regex_update_process_row(
} }
fn return_mapped_process( fn return_mapped_process(
process: &data_collection::processes::ProcessData, app_data: &data_collection::Data, process: &data_harvester::processes::ProcessData, app_data: &data_harvester::Data,
) -> ConvertedProcessData { ) -> ConvertedProcessData {
ConvertedProcessData { ConvertedProcessData {
pid: process.pid, pid: process.pid,
@ -251,7 +253,7 @@ fn return_mapped_process(
} }
pub fn update_cpu_data_points( pub fn update_cpu_data_points(
show_avg_cpu: bool, app_data: &data_collection::Data, show_avg_cpu: bool, app_data: &data_harvester::Data,
) -> Vec<ConvertedCpuData> { ) -> Vec<ConvertedCpuData> {
let mut cpu_data_vector: Vec<ConvertedCpuData> = Vec::new(); let mut cpu_data_vector: Vec<ConvertedCpuData> = Vec::new();
let mut cpu_collection: Vec<Vec<CpuPoint>> = Vec::new(); let mut cpu_collection: Vec<Vec<CpuPoint>> = Vec::new();
@ -264,7 +266,7 @@ pub fn update_cpu_data_points(
let mut this_cpu_data: Vec<CpuPoint> = Vec::new(); let mut this_cpu_data: Vec<CpuPoint> = Vec::new();
for data in &app_data.list_of_cpu_packages { for data in &app_data.list_of_cpu_packages {
let current_time = std::time::Instant::now(); let current_time = Instant::now();
let current_cpu_usage = data.cpu_vec[cpu_num].cpu_usage; let current_cpu_usage = data.cpu_vec[cpu_num].cpu_usage;
let new_entry = CpuPoint { let new_entry = CpuPoint {
@ -329,15 +331,15 @@ pub fn update_cpu_data_points(
cpu_data_vector cpu_data_vector
} }
pub fn update_mem_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> { pub fn update_mem_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> {
convert_mem_data(&app_data.memory) convert_mem_data(&app_data.memory)
} }
pub fn update_swap_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> { pub fn update_swap_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> {
convert_mem_data(&app_data.swap) convert_mem_data(&app_data.swap)
} }
pub fn update_mem_data_values(app_data: &data_collection::Data) -> Vec<(u64, u64)> { pub fn update_mem_data_values(app_data: &data_harvester::Data) -> Vec<(u64, u64)> {
let mut result: Vec<(u64, u64)> = Vec::new(); let mut result: Vec<(u64, u64)> = Vec::new();
result.push(get_most_recent_mem_values(&app_data.memory)); result.push(get_most_recent_mem_values(&app_data.memory));
result.push(get_most_recent_mem_values(&app_data.swap)); result.push(get_most_recent_mem_values(&app_data.swap));
@ -345,7 +347,7 @@ pub fn update_mem_data_values(app_data: &data_collection::Data) -> Vec<(u64, u64
result result
} }
fn get_most_recent_mem_values(mem_data: &[data_collection::mem::MemData]) -> (u64, u64) { fn get_most_recent_mem_values(mem_data: &[data_harvester::mem::MemData]) -> (u64, u64) {
let mut result: (u64, u64) = (0, 0); let mut result: (u64, u64) = (0, 0);
if !mem_data.is_empty() { if !mem_data.is_empty() {
@ -358,7 +360,7 @@ fn get_most_recent_mem_values(mem_data: &[data_collection::mem::MemData]) -> (u6
result result
} }
fn convert_mem_data(mem_data: &[data_collection::mem::MemData]) -> Vec<(f64, f64)> { fn convert_mem_data(mem_data: &[data_harvester::mem::MemData]) -> Vec<(f64, f64)> {
let mut result: Vec<(f64, f64)> = Vec::new(); let mut result: Vec<(f64, f64)> = Vec::new();
for data in mem_data { for data in mem_data {
@ -394,67 +396,45 @@ fn convert_mem_data(mem_data: &[data_collection::mem::MemData]) -> Vec<(f64, f64
result result
} }
pub fn update_network_data_points(app_data: &data_collection::Data) -> ConvertedNetworkData {
convert_network_data_points(&app_data.network)
}
pub fn convert_network_data_points( pub fn convert_network_data_points(
network_data: &data_collection::network::NetworkStorage, current_data: &data_janitor::DataCollection,
) -> ConvertedNetworkData { ) -> ConvertedNetworkData {
let mut rx: Vec<(f64, f64)> = Vec::new(); let mut rx: Vec<(f64, f64)> = Vec::new();
let mut tx: Vec<(f64, f64)> = Vec::new(); let mut tx: Vec<(f64, f64)> = Vec::new();
let current_time = network_data.last_collection_time; let current_time = current_data.current_instant;
for (time, data) in &network_data.data_points { for (time, data) in &current_data.timed_data_vec {
let time_from_start: f64 = ((TIME_STARTS_FROM as f64 let time_from_start: f64 = (TIME_STARTS_FROM as f64
- current_time.duration_since(*time).as_millis() as f64) - current_time.duration_since(*time).as_millis() as f64)
* 10_f64)
.floor(); .floor();
// Insert in joiner points //Insert joiner points
if let Some(joiners) = &data.1 { for &(joiner_offset, joiner_val) in &data.rx_data.1 {
for joiner in joiners { let offset_time = time_from_start - joiner_offset as f64;
let offset_time = time_from_start - joiner.time_offset_milliseconds as f64 * 10_f64; rx.push((
rx.push(( offset_time,
offset_time, if joiner_val > 0.0 {
if joiner.rx > 0.0 { (joiner_val).log(2.0)
(joiner.rx).log(2.0) } else {
} else { 0.0
0.0 },
}, ));
));
tx.push((
offset_time,
if joiner.tx > 0.0 {
(joiner.tx).log(2.0)
} else {
0.0
},
));
}
} }
// Insert in main points for &(joiner_offset, joiner_val) in &data.tx_data.1 {
let rx_data = ( let offset_time = time_from_start - joiner_offset as f64;
time_from_start, tx.push((
if data.0.rx > 0 { offset_time,
(data.0.rx as f64).log(2.0) if joiner_val > 0.0 {
} else { (joiner_val).log(2.0)
0.0 } else {
}, 0.0
); },
let tx_data = ( ));
time_from_start, }
if data.0.tx > 0 {
(data.0.tx as f64).log(2.0)
} else {
0.0
},
);
rx.push(rx_data); rx.push((time_from_start, data.rx_data.0));
tx.push(tx_data); tx.push((time_from_start, data.tx_data.0));
} }
let total_rx_converted_result: (f64, String); let total_rx_converted_result: (f64, String);
@ -462,8 +442,8 @@ pub fn convert_network_data_points(
let total_tx_converted_result: (f64, String); let total_tx_converted_result: (f64, String);
let tx_converted_result: (f64, String); let tx_converted_result: (f64, String);
rx_converted_result = get_exact_byte_values(network_data.rx, false); rx_converted_result = get_exact_byte_values(current_data.network_harvest.rx, false);
total_rx_converted_result = get_exact_byte_values(network_data.total_rx, false); total_rx_converted_result = get_exact_byte_values(current_data.network_harvest.total_rx, false);
let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1); let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1);
let total_rx_display = if cfg!(not(target_os = "windows")) { let total_rx_display = if cfg!(not(target_os = "windows")) {
format!( format!(
@ -474,8 +454,8 @@ pub fn convert_network_data_points(
"N/A".to_string() "N/A".to_string()
}; };
tx_converted_result = get_exact_byte_values(network_data.tx, false); tx_converted_result = get_exact_byte_values(current_data.network_harvest.tx, false);
total_tx_converted_result = get_exact_byte_values(network_data.total_tx, false); total_tx_converted_result = get_exact_byte_values(current_data.network_harvest.total_tx, false);
let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1); let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1);
let total_tx_display = if cfg!(not(target_os = "windows")) { let total_tx_display = if cfg!(not(target_os = "windows")) {
format!( format!(

View File

@ -35,8 +35,8 @@ mod canvas;
mod constants; mod constants;
mod data_conversion; mod data_conversion;
use app::data_collection; use app::data_harvester;
use app::data_collection::processes::ProcessData; use app::data_harvester::processes::ProcessData;
use constants::TICK_RATE_IN_MILLISECONDS; use constants::TICK_RATE_IN_MILLISECONDS;
use data_conversion::*; use data_conversion::*;
use std::collections::BTreeMap; use std::collections::BTreeMap;
@ -45,7 +45,7 @@ use utils::error::{self, BottomError};
enum Event<I, J> { enum Event<I, J> {
KeyInput(I), KeyInput(I),
MouseInput(J), MouseInput(J),
Update(Box<data_collection::Data>), Update(Box<data_harvester::Data>),
} }
enum ResetEvent { enum ResetEvent {
@ -104,11 +104,11 @@ fn main() -> error::Result<()> {
// Set other settings // Set other settings
let temperature_type = if matches.is_present("FAHRENHEIT") { let temperature_type = if matches.is_present("FAHRENHEIT") {
data_collection::temperature::TemperatureType::Fahrenheit data_harvester::temperature::TemperatureType::Fahrenheit
} else if matches.is_present("KELVIN") { } else if matches.is_present("KELVIN") {
data_collection::temperature::TemperatureType::Kelvin data_harvester::temperature::TemperatureType::Kelvin
} else { } else {
data_collection::temperature::TemperatureType::Celsius data_harvester::temperature::TemperatureType::Celsius
}; };
let show_average_cpu = matches.is_present("AVG_CPU"); let show_average_cpu = matches.is_present("AVG_CPU");
let use_dot = matches.is_present("DOT_MARKER"); let use_dot = matches.is_present("DOT_MARKER");
@ -183,7 +183,7 @@ fn main() -> error::Result<()> {
let temp_type = app.temperature_type.clone(); let temp_type = app.temperature_type.clone();
thread::spawn(move || { thread::spawn(move || {
let tx = tx.clone(); let tx = tx.clone();
let mut data_state = data_collection::DataState::default(); let mut data_state = data_harvester::DataState::default();
data_state.init(); data_state.init();
data_state.set_temperature_type(temp_type); data_state.set_temperature_type(temp_type);
data_state.set_use_current_cpu_total(use_current_cpu_total); data_state.set_use_current_cpu_total(use_current_cpu_total);
@ -193,21 +193,22 @@ fn main() -> error::Result<()> {
ResetEvent::Reset => { ResetEvent::Reset => {
//debug!("Received reset message"); //debug!("Received reset message");
first_run = true; first_run = true;
data_state.data = app::data_collection::Data::default(); data_state.data = app::data_harvester::Data::default();
} }
} }
} }
futures::executor::block_on(data_state.update_data()); futures::executor::block_on(data_state.update_data());
tx.send(Event::Update(Box::from(data_state.data.clone())))
.unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it
if first_run { if first_run {
// Fix for if you set a really long time for update periods (and just gives a faster first value) // Fix for if you set a really long time for update periods (and just gives a faster first value)
data_state.data.first_run_cleanup(); // TODO: [OPT] we can remove this later.
thread::sleep(Duration::from_millis(250)); thread::sleep(Duration::from_millis(250));
futures::executor::block_on(data_state.update_data());
first_run = false; first_run = false;
} else {
thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64));
} }
tx.send(Event::Update(Box::from(data_state.data.clone())))
.unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it
thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64));
} }
}); });
} }
@ -277,12 +278,14 @@ fn main() -> error::Result<()> {
// NOTE TO SELF - data is refreshed into app state HERE! That means, if it is // NOTE TO SELF - data is refreshed into app state HERE! That means, if it is
// frozen, then, app.data is never refreshed, until unfrozen! // frozen, then, app.data is never refreshed, until unfrozen!
if !app.is_frozen { if !app.is_frozen {
app.data_collection.eat_data(&data);
app.data = *data; app.data = *data;
handle_process_sorting(&mut app); handle_process_sorting(&mut app);
// Convert all data into tui components // Convert all data into tui components
let network_data = update_network_data_points(&app.data); let network_data = convert_network_data_points(&app.data_collection);
app.canvas_data.network_data_rx = network_data.rx; app.canvas_data.network_data_rx = network_data.rx;
app.canvas_data.network_data_tx = network_data.tx; app.canvas_data.network_data_tx = network_data.tx;
app.canvas_data.rx_display = network_data.rx_display; app.canvas_data.rx_display = network_data.rx_display;
@ -303,9 +306,9 @@ fn main() -> error::Result<()> {
} }
// Quick fix for tab updating the table headers // Quick fix for tab updating the table headers
if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type { if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type {
if app.is_grouped() { if app.is_grouped() {
app.process_sorting_type = data_collection::processes::ProcessSorting::CPU; // Go back to default, negate PID for group app.process_sorting_type = data_harvester::processes::ProcessSorting::CPU; // Go back to default, negate PID for group
app.process_sorting_reverse = true; app.process_sorting_reverse = true;
} }
} }
@ -372,14 +375,14 @@ fn handle_process_sorting(app: &mut app::App) {
); );
if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes { if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes {
if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type { if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type {
data_collection::processes::sort_processes( data_harvester::processes::sort_processes(
grouped_list_of_processes, grouped_list_of_processes,
&data_collection::processes::ProcessSorting::CPU, // Go back to default, negate PID for group &data_harvester::processes::ProcessSorting::CPU, // Go back to default, negate PID for group
true, true,
); );
} else { } else {
data_collection::processes::sort_processes( data_harvester::processes::sort_processes(
grouped_list_of_processes, grouped_list_of_processes,
&app.process_sorting_type, &app.process_sorting_type,
app.process_sorting_reverse, app.process_sorting_reverse,
@ -387,7 +390,7 @@ fn handle_process_sorting(app: &mut app::App) {
} }
} }
data_collection::processes::sort_processes( data_harvester::processes::sort_processes(
&mut app.data.list_of_processes, &mut app.data.list_of_processes,
&app.process_sorting_type, &app.process_sorting_type,
app.process_sorting_reverse, app.process_sorting_reverse,