Rudimentary charting for cpu and mem.

This commit is contained in:
ClementTsang 2019-09-11 00:52:51 -04:00
parent 0d76c49973
commit 2032660230
4 changed files with 105 additions and 28 deletions

View File

@ -1,6 +1,6 @@
# rustop
A [gotop](https://github.com/cjbassi/gotop) clone, written in Rust.
A top clone, written in Rust. Inspired by both [gtop](https://github.com/aksakalli/gtop) and [gotop](https://github.com/cjbassi/gotop)
## Installation
@ -18,7 +18,7 @@ Currently, I'm unable to test on MacOS, so I'm not sure how well this will work,
## Thanks
* As mentioned, this project is most definitely inspired by [gotop](https://github.com/cjbassi/gotop).
* As mentioned, this project is very much inspired by both [gotop](https://github.com/cjbassi/gotop) and [gtop](https://github.com/aksakalli/gtop) .
* This application was written with the following libraries:
* [crossterm](https://github.com/TimonPost/crossterm)

View File

@ -12,6 +12,10 @@
* Keybindings
* FIX PROCESSES AHHHHHH
* Refactor everything because it's a mess
* Test for Windows support, mac support
* Efficiency!!! Make sure no wasted hashmaps, use references, etc.

View File

@ -19,6 +19,8 @@ enum Event<I> {
Update(Box<widgets::Data>),
}
const STALE_MAX_SECONDS : u64 = 60;
#[tokio::main]
async fn main() -> Result<(), io::Error> {
let screen = AlternateScreen::to_alternate(true)?;
@ -52,6 +54,7 @@ async fn main() -> Result<(), io::Error> {
// Event loop
let mut data_state = widgets::DataState::default();
data_state.set_stale_max_seconds(STALE_MAX_SECONDS);
{
let tx = tx.clone();
thread::spawn(move || {
@ -176,11 +179,16 @@ fn draw_data<B : tui::backend::Backend>(terminal : &mut Terminal<B>, app_data :
.margin(0)
.constraints([Constraint::Percentage(40), Constraint::Percentage(60)].as_ref())
.split(vertical_chunks[1]);
let middle_divided_chunk = Layout::default()
let middle_divided_chunk_1 = Layout::default()
.direction(Direction::Vertical)
.margin(0)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(middle_chunks[0]);
let middle_divided_chunk_2 = Layout::default()
.direction(Direction::Vertical)
.margin(0)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(middle_chunks[1]);
let bottom_chunks = Layout::default()
.direction(Direction::Horizontal)
.margin(0)
@ -190,38 +198,70 @@ fn draw_data<B : tui::backend::Backend>(terminal : &mut Terminal<B>, app_data :
// Set up blocks and their components
// CPU usage graph
let x_axis : Axis<String> = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 10.0]);
let y_axis : Axis<String> = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 10.0]);
Chart::default()
.block(Block::default().title("CPU Usage").borders(Borders::ALL))
.x_axis(x_axis)
.y_axis(y_axis)
.datasets(&[Dataset::default()
.name("data1")
.marker(Marker::Dot)
.style(Style::default().fg(Color::Cyan))
.data(&[(0.0, 5.0), (1.0, 6.0), (1.5, 6.434)])])
.render(&mut f, top_chunks[0]);
{
let x_axis : Axis<String> = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 60.0]);
let y_axis = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 100.0]).labels(&["0.0", "50.0", "100.0"]);
Chart::default()
.block(Block::default().title("CPU Usage").borders(Borders::ALL))
.x_axis(x_axis)
.y_axis(y_axis)
.datasets(&[
Dataset::default()
.name("CPU0")
.marker(Marker::Braille)
.style(Style::default().fg(Color::Cyan))
.data(&convert_cpu_data(0, &app_data.list_of_cpu_packages)),
Dataset::default()
.name("CPU1")
.marker(Marker::Braille)
.style(Style::default().fg(Color::LightMagenta))
.data(&convert_cpu_data(1, &app_data.list_of_cpu_packages)),
])
.render(&mut f, top_chunks[0]);
}
//Memory usage graph
Block::default().title("Memory Usage").borders(Borders::ALL).render(&mut f, top_chunks[1]);
{
let x_axis : Axis<String> = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 60.0]);
let y_axis = Axis::default().style(Style::default().fg(Color::White)).bounds([0.0, 100.0]).labels(&["0.0", "50.0", "100.0"]);
Chart::default()
.block(Block::default().title("Memory Usage").borders(Borders::ALL))
.x_axis(x_axis)
.y_axis(y_axis)
.datasets(&[
Dataset::default()
.name("MEM")
.marker(Marker::Braille)
.style(Style::default().fg(Color::Cyan))
.data(&convert_mem_data(&app_data.memory)),
Dataset::default()
.name("SWAP")
.marker(Marker::Braille)
.style(Style::default().fg(Color::LightGreen))
.data(&convert_mem_data(&app_data.swap)),
])
.render(&mut f, top_chunks[1]);
}
// Temperature table
Table::new(["Sensor", "Temperature"].iter(), temperature_rows)
.block(Block::default().title("Temperatures").borders(Borders::ALL))
.header_style(Style::default().fg(Color::LightBlue))
.widths(&[15, 5])
.render(&mut f, middle_divided_chunk[0]);
.render(&mut f, middle_divided_chunk_1[0]);
// Disk usage table
Table::new(["Disk", "Mount", "Used", "Total", "Free"].iter(), disk_rows)
.block(Block::default().title("Disk Usage").borders(Borders::ALL))
.header_style(Style::default().fg(Color::LightBlue))
.widths(&[15, 10, 5, 5, 5])
.render(&mut f, middle_divided_chunk[1]);
.render(&mut f, middle_divided_chunk_1[1]);
// Temp graph
Block::default().title("Temperatures").borders(Borders::ALL).render(&mut f, middle_divided_chunk_2[0]);
// IO graph
Block::default().title("IO Usage").borders(Borders::ALL).render(&mut f, middle_chunks[1]);
Block::default().title("IO Usage").borders(Borders::ALL).render(&mut f, middle_divided_chunk_2[1]);
// Network graph
Block::default().title("Network").borders(Borders::ALL).render(&mut f, bottom_chunks[0]);
@ -237,6 +277,35 @@ fn draw_data<B : tui::backend::Backend>(terminal : &mut Terminal<B>, app_data :
Ok(())
}
// TODO: Remove this count, this is for testing, lol
fn convert_cpu_data(count : usize, cpu_data : &[widgets::cpu::CPUPackage]) -> Vec<(f64, f64)> {
let mut result : Vec<(f64, f64)> = Vec::new();
let current_time = std::time::Instant::now();
for data in cpu_data {
result.push((
STALE_MAX_SECONDS as f64 - current_time.duration_since(data.instant).as_secs() as f64,
f64::from(data.cpu_vec[count + 1].cpu_usage),
));
}
result
}
fn convert_mem_data(mem_data : &[widgets::mem::MemData]) -> Vec<(f64, f64)> {
let mut result : Vec<(f64, f64)> = Vec::new();
let current_time = std::time::Instant::now();
for data in mem_data {
result.push((
STALE_MAX_SECONDS as f64 - current_time.duration_since(data.instant).as_secs() as f64,
data.mem_used_in_mb as f64 / data.mem_total_in_mb as f64 * 100_f64,
));
}
result
}
fn init_logger() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| {

View File

@ -44,6 +44,7 @@ pub struct Data {
pub struct DataState {
pub data : Data,
sys : System,
stale_max_seconds : u64,
}
impl Default for DataState {
@ -51,18 +52,20 @@ impl Default for DataState {
DataState {
data : Data::default(),
sys : System::new(),
stale_max_seconds : 60,
}
}
}
impl DataState {
pub fn set_stale_max_seconds(&mut self, stale_max_seconds : u64) {
self.stale_max_seconds = stale_max_seconds;
}
pub async fn update_data(&mut self) {
debug!("Start updating...");
self.sys.refresh_system();
self.sys.refresh_network();
const STALE_MAX_SECONDS : u64 = 60;
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
push_if_valid(&network::get_network_data(&self.sys), &mut self.data.network);
push_if_valid(&cpu::get_cpu_data_list(&self.sys), &mut self.data.list_of_cpu_packages);
@ -77,14 +80,15 @@ impl DataState {
push_if_valid(&disks::get_io_usage_list(true).await, &mut self.data.list_of_physical_io);
set_if_valid(&temperature::get_temperature_data().await, &mut self.data.list_of_temperature);
// Filter out stale timed entries...
// Filter out stale timed entries
// TODO: ideally make this a generic function!
let current_instant = std::time::Instant::now();
self.data.list_of_cpu_packages = self
.data
.list_of_cpu_packages
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
self.data.memory = self
@ -92,7 +96,7 @@ impl DataState {
.memory
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
self.data.swap = self
@ -100,7 +104,7 @@ impl DataState {
.swap
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
self.data.network = self
@ -108,7 +112,7 @@ impl DataState {
.network
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
self.data.list_of_io = self
@ -116,7 +120,7 @@ impl DataState {
.list_of_io
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
self.data.list_of_physical_io = self
@ -124,7 +128,7 @@ impl DataState {
.list_of_physical_io
.iter()
.cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= STALE_MAX_SECONDS)
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds)
.collect::<Vec<_>>();
debug!("End updating...");