Got processing switched to heim
This commit is contained in:
parent
f9b98c71ec
commit
ace6a4bc68
|
@ -8,6 +8,8 @@ edition = "2018"
|
|||
|
||||
[dependencies]
|
||||
futures-preview = "0.3.0-alpha.18"
|
||||
futures-timer = "0.3"
|
||||
futures-util = "0.2.1"
|
||||
heim = "0.0.7"
|
||||
heim-common = "0.0.7"
|
||||
sysinfo = "0.9.4"
|
||||
|
|
29
src/main.rs
29
src/main.rs
|
@ -12,21 +12,24 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||
let get_physical_io = false;
|
||||
let mut sys = System::new();
|
||||
|
||||
let mut list_of_timed_processes : Vec<cpu::TimedCPUPackages> = Vec::new();
|
||||
let mut list_of_timed_cpu_packages : Vec<cpu::TimedCPUPackages> = Vec::new();
|
||||
let mut list_of_timed_io : Vec<Vec<disks::TimedIOInfo>> = Vec::new();
|
||||
let mut list_of_timed_physical_io : Vec<Vec<disks::TimedIOInfo>> = Vec::new();
|
||||
|
||||
loop {
|
||||
dbg!("Start data loop...");
|
||||
println!("Start data loop...");
|
||||
sys.refresh_system();
|
||||
|
||||
// Get data, potentially store?
|
||||
//let list_of_processes = processes::get_sorted_processes_list(processes::ProcessSorting::NAME, true);
|
||||
// TODO: Get data, potentially store? Use a result to check!
|
||||
let list_of_processes = processes::get_sorted_processes_list(processes::ProcessSorting::NAME, true).await;
|
||||
for process in list_of_processes {
|
||||
println!("Process: {} with PID {}, CPU: {}, MEM: {}", process.command, process.pid, process.cpu_usage, process.mem_usage,);
|
||||
}
|
||||
|
||||
let list_of_disks = disks::get_disk_usage_list().await?;
|
||||
|
||||
for disk in list_of_disks {
|
||||
dbg!("{} is mounted on {}: {}/{} free.", disk.name, disk.mount_point, disk.avail_space as f64, disk.total_space as f64);
|
||||
println!("{} is mounted on {}: {}/{} free.", disk.name, disk.mount_point, disk.avail_space as f64, disk.total_space as f64);
|
||||
// TODO: Check if this is valid
|
||||
}
|
||||
|
||||
|
@ -35,26 +38,26 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||
|
||||
if !list_of_timed_io.is_empty() {
|
||||
for io in list_of_timed_io.last().unwrap() {
|
||||
dbg!("IO counter for {} at {:?}: {} writes, {} reads.", &io.mount_point, io.time, io.write_bytes, io.read_bytes);
|
||||
println!("IO counter for {} at {:?}: {} writes, {} reads.", &io.mount_point, io.time, io.write_bytes, io.read_bytes);
|
||||
}
|
||||
}
|
||||
if !list_of_timed_physical_io.is_empty() {
|
||||
for io in list_of_timed_physical_io.last().unwrap() {
|
||||
dbg!("Physical IO counter for {} at {:?}: {} writes, {} reads.", &io.mount_point, io.time, io.write_bytes, io.read_bytes);
|
||||
println!("Physical IO counter for {} at {:?}: {} writes, {} reads.", &io.mount_point, io.time, io.write_bytes, io.read_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
list_of_timed_processes.push(cpu::get_cpu_data_list(&sys));
|
||||
list_of_timed_cpu_packages.push(cpu::get_cpu_data_list(&sys));
|
||||
|
||||
if !list_of_timed_processes.is_empty() {
|
||||
let current_cpu_time = list_of_timed_processes.last().unwrap().time;
|
||||
for cpu in &list_of_timed_processes.last().unwrap().processor_list {
|
||||
dbg!("CPU {} has {}% usage at timestamp {:?}!", &cpu.cpu_name, cpu.cpu_usage, current_cpu_time);
|
||||
if !list_of_timed_cpu_packages.is_empty() {
|
||||
let current_cpu_time = list_of_timed_cpu_packages.last().unwrap().time;
|
||||
for cpu in &list_of_timed_cpu_packages.last().unwrap().processor_list {
|
||||
println!("CPU {} has {}% usage at timestamp {:?}!", &cpu.cpu_name, cpu.cpu_usage, current_cpu_time);
|
||||
}
|
||||
}
|
||||
|
||||
// Send to drawing module
|
||||
dbg!("End data loop...");
|
||||
println!("End data loop...");
|
||||
window::draw_terminal();
|
||||
|
||||
// Repeat on interval
|
||||
|
|
|
@ -53,14 +53,14 @@ pub async fn get_disk_usage_list() -> Result<Vec<DiskInfo>, heim::Error> {
|
|||
let mut partitions_stream = heim::disk::partitions_physical();
|
||||
|
||||
while let Some(part) = partitions_stream.next().await {
|
||||
let part = part?;
|
||||
let usage = heim::disk::usage(part.mount_point().to_path_buf()).await?;
|
||||
let partition = part?; // TODO: Change this? We don't want to error out immediately...
|
||||
let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?;
|
||||
|
||||
vec_disks.push(DiskInfo {
|
||||
avail_space : usage.free().get::<heim_common::units::information::megabyte>(),
|
||||
total_space : usage.total().get::<heim_common::units::information::megabyte>(),
|
||||
mount_point : Box::from(part.mount_point().to_str().unwrap_or("Name Unavailable")),
|
||||
name : Box::from(part.device().unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")).to_str().unwrap_or("Name Unavailable")),
|
||||
mount_point : Box::from(partition.mount_point().to_str().unwrap_or("Name Unavailable")),
|
||||
name : Box::from(partition.device().unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable")).to_str().unwrap_or("Name Unavailable")),
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
use heim_common::prelude::StreamExt;
|
||||
use heim_common::{
|
||||
prelude::{StreamExt, TryStreamExt},
|
||||
units,
|
||||
};
|
||||
|
||||
pub enum ProcessSorting {
|
||||
CPU,
|
||||
|
@ -10,12 +13,10 @@ pub enum ProcessSorting {
|
|||
// Possible process info struct?
|
||||
#[derive(Debug)]
|
||||
pub struct ProcessInfo {
|
||||
pid : u32,
|
||||
cpu_usage : f32,
|
||||
mem_usage : u64,
|
||||
uptime : u64,
|
||||
command : Box<str>,
|
||||
// TODO: Env?
|
||||
pub pid : u32,
|
||||
pub cpu_usage : f32,
|
||||
pub mem_usage : u64,
|
||||
pub command : String,
|
||||
}
|
||||
|
||||
fn get_ordering<T : std::cmp::PartialOrd>(a_val : T, b_val : T, reverse_order : bool) -> std::cmp::Ordering {
|
||||
|
@ -40,20 +41,36 @@ fn get_ordering<T : std::cmp::PartialOrd>(a_val : T, b_val : T, reverse_order :
|
|||
}
|
||||
}
|
||||
|
||||
async fn cpu_usage(process : heim::process::Process) -> heim::process::ProcessResult<(heim::process::Process, heim_common::units::Ratio)> {
|
||||
let usage_1 = process.cpu_usage().await?;
|
||||
futures_timer::Delay::new(std::time::Duration::from_millis(100)).await?;
|
||||
let usage_2 = process.cpu_usage().await?;
|
||||
|
||||
Ok((process, usage_2 - usage_1))
|
||||
}
|
||||
|
||||
pub async fn get_sorted_processes_list(sorting_method : ProcessSorting, reverse_order : bool) -> Vec<ProcessInfo> {
|
||||
let mut process_stream = heim::process::processes();
|
||||
let mut process_stream = heim::process::processes().map_ok(cpu_usage).try_buffer_unordered(std::usize::MAX);
|
||||
|
||||
// TODO: Evaluate whether this is too slow!
|
||||
// TODO: Should I filter out blank command names?
|
||||
|
||||
let mut process_vector : Vec<ProcessInfo> = Vec::new();
|
||||
while let Some(process) = process_stream.next().await {}
|
||||
|
||||
while let Some(process) = process_stream.next().await {
|
||||
let (process, cpu_usage) = process.unwrap();
|
||||
let mem_measurement = process.memory().await.unwrap();
|
||||
process_vector.push(ProcessInfo {
|
||||
command : process.name().await.unwrap_or_else(|_| "".to_string()),
|
||||
pid : process.pid() as u32,
|
||||
cpu_usage : cpu_usage.get::<units::ratio::percent>(),
|
||||
mem_usage : mem_measurement.rss().get::<units::information::megabyte>(),
|
||||
});
|
||||
}
|
||||
match sorting_method {
|
||||
ProcessSorting::CPU => process_vector.sort_by(|a, b| get_ordering(1, 2, reverse_order)),
|
||||
ProcessSorting::MEM => process_vector.sort_by(|a, b| get_ordering(1, 2, reverse_order)),
|
||||
ProcessSorting::PID => process_vector.sort_by(|a, b| get_ordering(1, 2, reverse_order)),
|
||||
ProcessSorting::NAME => process_vector.sort_by(|a, b| get_ordering(1, 2, reverse_order)),
|
||||
ProcessSorting::CPU => process_vector.sort_by(|a, b| get_ordering(a.cpu_usage, b.cpu_usage, reverse_order)),
|
||||
ProcessSorting::MEM => process_vector.sort_by(|a, b| get_ordering(a.mem_usage, b.mem_usage, reverse_order)),
|
||||
ProcessSorting::PID => process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order)),
|
||||
ProcessSorting::NAME => process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, reverse_order)),
|
||||
}
|
||||
|
||||
process_vector
|
||||
|
|
Loading…
Reference in New Issue